repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
re9ulus/moocs_std
|
https://github.com/re9ulus/moocs_std
|
c1e67b99da3eb894a9ef6ee4f16ae4e39735b6e5
|
29b14164fec969c3801c72b96357ee109246d642
|
0c44f2b1edcb50715eda693980a013970d777182
|
refs/heads/master
| 2021-06-14T08:36:42.658838 | 2020-12-05T17:08:19 | 2020-12-05T17:08:19 | 68,093,511 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.44140028953552246,
"alphanum_fraction": 0.4672755002975464,
"avg_line_length": 24.269229888916016,
"blob_id": "c0e4c4446b9fcd736342ffccd19ba71ac1c22677",
"content_id": "8dbc7da73ea5805ab80c8ec7a44d5c3073b4a9f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 657,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 26,
"path": "/coursera_algo_hse/c1/w5/knapsack.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\nimport sys\n\ndef optimal_weight(W, w):\n # write your code here\n n = len(w)\n ar = [[0 for i in range(W + 1)] for j in range(n + 1)]\n\n for i in range(1, n + 1):\n for k in range(1, W + 1):\n cur_weight = k\n prev_val = 0\n if w[i-1] <= cur_weight:\n prev_val = ar[i-1][k-w[i-1]] + w[i-1]\n ar[i][k] = max([ar[i][k], prev_val, ar[i-1][k]])\n\n # for row in ar:\n # print row\n return ar[n][W]\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n W, n, w = data[0], data[1], data[2:]\n print(optimal_weight(W, w))\n"
},
{
"alpha_fraction": 0.47671231627464294,
"alphanum_fraction": 0.5095890164375305,
"avg_line_length": 21.8125,
"blob_id": "5c167e9e5ff5cc3c321da3e6f82ed43b3b0500d2",
"content_id": "d816affa793f998db892085fdb70710528f5fa78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 16,
"path": "/coursera_algo_hse/c1/w2/fibonacci_huge.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python3\nimport sys\n\ndef get_fibonaccihuge(n, m):\n if n <= 1:\n \treturn n\n ar = [0, 1, 1]\n while not(ar[-1] == 1 and ar[-2] == 0):\n \tar.append((ar[-1] + ar[-2]) % m)\n period = len(ar) - 2\n return ar[n % period]\n\nif __name__ == '__main__':\n input = sys.stdin.read();\n n, m = map(int, input.split())\n print(get_fibonaccihuge(n, m))\n"
},
{
"alpha_fraction": 0.47083333134651184,
"alphanum_fraction": 0.5,
"avg_line_length": 15,
"blob_id": "cdbe4d095ab03840a64cb961dd664abc9130fdcf",
"content_id": "cd2a2365ccb8c28a03ad72ceceb04f5c698d7fb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 15,
"path": "/coursera_algo_hse/c1/w3/change.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\nimport sys\n\ndef get_change(m):\n #write your code here\n ans = m / 10\n m %= 10\n ans += m / 5\n m %= 5\n ans += m\n return ans\n\nif __name__ == '__main__':\n m = int(sys.stdin.read())\n print(get_change(m))\n"
},
{
"alpha_fraction": 0.519064724445343,
"alphanum_fraction": 0.5280575752258301,
"avg_line_length": 30.579545974731445,
"blob_id": "d453ca8cc71e82f4ff5ce07d259501c59b4412df",
"content_id": "96e2609658f1e8f3380d6154dcee63e3b9ae518f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2780,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 88,
"path": "/ods_ml/notebooks/hw_8/process.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "\nPOSSIBLE_TAGS = ['javascript', 'java', 'python', 'ruby',\\\n 'php', 'c++', 'c#', 'go', 'scala', 'swift']\n\nLABELS = {val: str(key + 1) for key, val in enumerate(POSSIBLE_TAGS)}\n\ndef prepare_text(text):\n text = text.strip()\n text = text.replace('|', '')\n text = text.replace(':', '')\n return text\n\ndef process_line(line):\n line = line.strip()\n parts = line.split('\\t')\n if len(parts) != 2 or not line[0] or not line[1]:\n return None, None\n text = prepare_text(parts[0])\n tags = parts[1].split()\n if not text or not tags:\n return None, None\n tags_found = 0\n res_tag = None\n for tag in POSSIBLE_TAGS:\n if tag in tags:\n tags_found += 1\n if tags_found > 1:\n res_tag = None\n break\n res_tag = tag\n return text, res_tag\n\n\ndef process_file(filename, target_filename):\n print('Start processing file')\n line_counter = 0\n with open(filename, 'r') as f_from:\n with open(target_filename, 'w+') as f_to:\n for line in f_from:\n text, tag = process_line(line)\n if text and tag:\n tag = LABELS[tag]\n f_to.write('{} |{}\\n'.format(tag, text))\n line_counter += 1\n print('Done, lines: {}'.format(line_counter))\n\n\ndef split_train_test(filename):\n print('Start split')\n train_size = 1463018\n ind = 0\n train_file = open('stackoverflow_train.vw', 'w+')\n test_file = open('stackoverflow_test.vw', 'w+')\n valid_file = open('stackoverflow_valid.vw', 'w+')\n train_labels = open('stackoverflow_train.txt', 'w+')\n test_labels = open('stackoverflow_test.txt', 'w+')\n valid_labels = open('stackoverflow_valid.txt', 'w+')\n\n with open(filename, 'r') as file_from:\n for line in file_from:\n tag = line.split('|')[0].strip()\n if ind < train_size:\n train_file.write(line)\n train_labels.write('{}\\n'.format(tag))\n elif ind < 2 * train_size:\n valid_file.write(line)\n valid_labels.write('{}\\n'.format(tag))\n else:\n test_file.write(line)\n test_labels.write('{}\\n'.format(tag))\n ind += 1\n if ind == train_size:\n print('== train done, writing valid')\n if ind == 2 * train_size:\n print('== valid done, writing test')\n\n train_file.close()\n test_file.close()\n valid_file.close()\n train_labels.close()\n test_labels.close()\n print('Done')\n\n\nif __name__ == '__main__':\n from_file = 'F:\\stack_overflow_data\\stackoverflow.10kk.tsv'\n to_file = 'input.vw'\n process_file(from_file, to_file)\n split_train_test(to_file)\n"
},
{
"alpha_fraction": 0.4841379225254059,
"alphanum_fraction": 0.5103448033332825,
"avg_line_length": 20.323530197143555,
"blob_id": "dbef6e8eadbdeb47bf1738e304adefb9cf3695ad",
"content_id": "01a0d749759bfc8ed193db64cc089a79d4eace26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 34,
"path": "/coursera_algo_hse/c3/w2/acyclicity.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "#Uses python3\n\nimport sys\n\ndef dfs(adj, node):\n if color[node] == 2:\n return False\n elif color[node] == 1:\n return True\n color[node] = 1\n res = False\n for to in adj[node]:\n res |= dfs(adj, to)\n color[node] = 2\n return res\n\ndef acyclic(adj):\n res = False\n for i in range(n):\n res |= dfs(adj, i)\n return 1 if res else 0\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))\n adj = [[] for _ in range(n)]\n for (a, b) in edges:\n adj[a - 1].append(b - 1)\n\n color = [0 for i in range(n)]\n print(acyclic(adj))\n"
},
{
"alpha_fraction": 0.5109114050865173,
"alphanum_fraction": 0.5256739258766174,
"avg_line_length": 24.129032135009766,
"blob_id": "9b84f7406c0543c5cb367e9c7756c1db6dabc0d0",
"content_id": "6c83923b7455581355d3f558fe34319fdc92cbf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1558,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 62,
"path": "/coursera_algo_hse/c3/w5/connecting_points.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "#Uses python3\nimport sys\nimport math\nfrom queue import PriorityQueue\n\n\ndef build_graph(x_coord, y_coord):\n G = {}\n counter = 0\n for i in range(len(x_coord)):\n for j in range(i+1, len(x_coord)):\n if i == j:\n continue\n node = ((x_coord[i], y_coord[i]), (x_coord[j], y_coord[j]))\n G[node] = math.sqrt((x_coord[i] - x_coord[j])**2 +\n (y_coord[i] - y_coord[j])**2)\n return G\n\n\ndef parent(disj_set, v):\n if disj_set[v] == v:\n return v\n disj_set[v] = parent(disj_set, disj_set[v])\n return disj_set[v]\n\n\ndef kruscal(G):\n disj_set = {}\n added_edges = set()\n edges = []\n for edge, val in G.items():\n edges.append((val,edge))\n if edge[0] not in disj_set:\n disj_set[edge[0]] = edge[0]\n if edge[1] not in disj_set:\n disj_set[edge[1]] = edge[1]\n edges = sorted(edges)\n for edge in edges:\n __, nodes = edge\n if parent(disj_set, nodes[0]) != parent(disj_set, nodes[1]):\n added_edges.add(edge)\n disj_set[parent(disj_set, nodes[0])] = parent(disj_set, nodes[1])\n return added_edges\n\n\ndef minimum_distance(x, y):\n result = 0.\n #write your code here\n g = build_graph(x, y)\n edges = kruscal(g)\n for cost, edge in edges:\n result += cost\n return result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n x = data[1::2]\n y = data[2::2]\n print(\"{0:.9f}\".format(minimum_distance(x, y)))\n"
},
{
"alpha_fraction": 0.5912806391716003,
"alphanum_fraction": 0.6321526169776917,
"avg_line_length": 19.38888931274414,
"blob_id": "bc32f6388025861fb2aa67a1fec48d5ed08873b4",
"content_id": "db33a5e97a330e98812b12cd2a406e5bd699f88b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 18,
"path": "/coursera_algo_hse/c1/w1/max_pairwise_prod.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python3\n\nimport sys\n\ninput = sys.stdin.read()\ntokens = [int(i) for i in input.split()[1:]]\n\nmax_ind1 = 0\nfor i in range(len(tokens)):\n\tif tokens[i] >= tokens[max_ind1]:\n\t\tmax_ind1 = i\n\nmax_ind2 = -1\nfor i in range(len(tokens)):\n\tif ((max_ind2==-1) or tokens[i] >= tokens[max_ind2]) and i != max_ind1:\n\t\tmax_ind2 = i\n\nprint(tokens[max_ind1] * tokens[max_ind2])\n"
},
{
"alpha_fraction": 0.4568965435028076,
"alphanum_fraction": 0.484674334526062,
"avg_line_length": 22.200000762939453,
"blob_id": "2b8a13650c0d67f5fb4e72f89601c2f93dfd8610",
"content_id": "f073a32db9990d6e18b3d7d183206a529b11ebef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1044,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 45,
"path": "/coursera_algo_hse/c1/w4/points_and_segments.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\nimport sys\n\ndef points_cmp(a, b):\n if a[0] != b[0]:\n return cmp(a[0], b[0])\n else:\n return cmp(a[1], b[1])\n\ndef naive_count_segments(starts, ends, points):\n items = []\n S, P, E = 0, 1, 2\n for p in starts:\n items.append((p, S))\n for p in points:\n items.append((p, P))\n for p in ends:\n items.append((p, E))\n items = sorted(items, cmp=points_cmp)\n cnt = 0\n points_cnt = {}\n for p in items:\n if p[1] == S:\n cnt += 1\n elif p[1] == P:\n points_cnt[p[0]] = cnt\n elif p[1] == E:\n cnt -= 1\n ans = []\n for p in points:\n ans.append(points_cnt[p])\n return ans\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n m = data[1]\n starts = data[2:2 * n + 2:2]\n ends = data[3:2 * n + 2:2]\n points = data[2 * n + 2:]\n #use fast_count_segments\n cnt = naive_count_segments(starts, ends, points)\n print(' '.join(map(str, cnt)))\n"
},
{
"alpha_fraction": 0.4486165940761566,
"alphanum_fraction": 0.5019763112068176,
"avg_line_length": 22,
"blob_id": "0e8f0faf1be2b8c7b272e9da579cceb35d445930",
"content_id": "a9c528897cbaea8a28b001db313000cc87c077ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 22,
"path": "/coursera_algo_hse/c1/w5/edit_distance.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\n\ndef edit_distance(s, t):\n\tar = [[0 for j in range(len(s) + 1)] for i in range(len(t) + 1)]\n\n\tfor i in range(1, len(t) + 1):\n\t\tar[i][0] = i\n\tfor j in range(1, len(s) + 1):\n\t\tar[0][j] = j\n\n\tfor i in range(1, len(t) + 1):\n\t\tfor j in range(1, len(s) + 1):\n\t\t\tdiag = ar[i-1][j-1]\n\t\t\tif t[i-1] != s[j-1]:\n\t\t\t\tdiag += 1\n\t\t\tar[i][j] = min(ar[i-1][j] + 1, ar[i][j-1] + 1, diag)\n\treturn ar[len(t)][len(s)]\n\nif __name__ == \"__main__\":\n\ts1 = raw_input()\n\ts2 = raw_input()\n\tprint(edit_distance(s1, s2))\n"
},
{
"alpha_fraction": 0.5249999761581421,
"alphanum_fraction": 0.5441666841506958,
"avg_line_length": 23.489795684814453,
"blob_id": "685bfa9080e61d48bd9e66eadabec155913eabc6",
"content_id": "cd823b0f2581082a5e6ef5268fe2778c973490c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1200,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 49,
"path": "/coursera_algo_hse/c3/w2/strongly_connected.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "#Uses python3\n\nimport sys\n\nsys.setrecursionlimit(200000)\n\ndef dfs(adj, times, visited, x):\n global time\n if visited[x]:\n return\n visited[x] = True\n for to in adj[x]:\n dfs(adj, times, visited, to)\n time += 1\n times.append((time, x))\n\n\ndef number_of_strongly_connected_components(adj):\n res = 0\n #write your code here\n rev_adj = [[] for i in range(len(adj))]\n for key in range(len(adj)):\n for node in adj[key]:\n rev_adj[node].append(key)\n times = []\n visited = [False] * n\n for i in range(len(adj)):\n dfs(rev_adj, times, visited, i)\n times = [y for x, y in sorted(times, reverse=True)]\n res = 0\n visited = [False] * n\n for node in times:\n if visited[node]:\n continue\n res += 1\n dfs(adj, [], visited, node)\n return res\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))\n adj = [[] for _ in range(n)]\n time = 0\n for (a, b) in edges:\n adj[a - 1].append(b - 1)\n print(number_of_strongly_connected_components(adj))\n"
},
{
"alpha_fraction": 0.4836852252483368,
"alphanum_fraction": 0.4913627505302429,
"avg_line_length": 19.84000015258789,
"blob_id": "4c1c3a056397607bb005498c3fb41cf08392fd5f",
"content_id": "c7354ff6c160bc6f19c012b3c7a62cba2f27335a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 521,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 25,
"path": "/coursera_algo_hse/c1/w5/placing_parentheses.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\ndef evalt(a, b, op):\n if op == '+':\n return a + b\n elif op == '-':\n return a - b\n elif op == '*':\n return a * b\n else:\n assert False\n\ndef get_maximum_value(dataset):\n #write your code here\n digits = []\n operators = []\n for i in range(len(dataset)):\n if i % 2 == 0:\n digits.append(int(dataset[i]))\n else:\n operators.append(dataset[i])\n return 0\n\n\nif __name__ == \"__main__\":\n print(get_maximum_value(raw_input()))\n"
},
{
"alpha_fraction": 0.4738371968269348,
"alphanum_fraction": 0.5087209343910217,
"avg_line_length": 17.105262756347656,
"blob_id": "baa382933e88cfd5b1c6eb07dcd6051f51de6b09",
"content_id": "03b3e427433229cdc4f4f73a9fe969feed645d34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 344,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 19,
"path": "/coursera_algo_hse/c1/w2/fibonacci_last_digit.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python3\nimport sys\n\n\ndef get_fibonacci_last_digit(n):\n if n <= 1:\n \treturn n\n n += 1\n ar = [0 for i in range(n)]\n ar[1] = 1\n for i in range(2, n):\n \tar[i] = (ar[i-1] + ar[i-2]) % 10\n return ar[-1]\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n = int(input)\n print(get_fibonacci_last_digit(n))\n"
},
{
"alpha_fraction": 0.4576502740383148,
"alphanum_fraction": 0.505464494228363,
"avg_line_length": 21.18181800842285,
"blob_id": "ff5c6567f9fff32ac5c8b6f11625a07f9b3e5b72",
"content_id": "e457347c76755753409050bc8d5a80198430121d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 732,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 33,
"path": "/coursera_algo_hse/c1/w5/primitive_calculator.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\nimport sys\n\n\ndef optimal_sequence(n):\n\tar = [n+1 for i in range(n+1)]\n\tar[0], ar[1] = 1, 1\n\tfor i in range(1, n+1):\n\t\tif i * 3 <= n:\n\t\t\tar[i * 3] = min(ar[i * 3], ar[i] + 1)\n\t\tif i * 2 <= n:\n\t\t\tar[i * 2] = min(ar[i * 2], ar[i] + 1)\n\t\tif i + 1 <= n:\n\t\t\tar[i + 1] = min(ar[i + 1], ar[i] + 1)\n\ti = n\n\tseq = []\n\twhile i >= 1:\n\t\tseq.append(i)\n\t\tmin_val, next_i = ar[i - 1], i - 1\n\t\tif i % 3 == 0 and ar[i / 3] < min_val:\n\t\t\tmin_val, next_i = ar[i / 3], i / 3\n\t\tif i % 2 == 0 and ar[i / 2] < min_val:\n\t\t\tmin_val, next_i = ar[i / 2], i / 2\n\t\ti = next_i\n\treturn list(reversed(seq))\n\n\ninput = sys.stdin.read()\nn = int(input)\nsequence = optimal_sequence(n)\nprint(len(sequence) - 1)\nans = ' '.join(map(str, sequence))\nprint ans\n"
},
{
"alpha_fraction": 0.49762389063835144,
"alphanum_fraction": 0.5125594139099121,
"avg_line_length": 29.06122398376465,
"blob_id": "497817f292ed5a9330eb82cab7695b2e9621ccb3",
"content_id": "7f623c005392096fcaffbe213bb82819a8b3a1d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1473,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 49,
"path": "/coursera_algo_hse/c3/w4/negative_cycle.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "#Uses python3\n\nimport sys\n\ndef bellman_ford(G, cost, visited, start):\n size = len(G)\n visited_costs = [float('inf') for i in range(size)]\n visited_costs[start] = 0\n was_relaxed = True\n\n for i in range(size):\n if not was_relaxed:\n break\n was_relaxed = False\n for node_from in range(size):\n for ind_to, node_to in enumerate(G[node_from]):\n if visited_costs[node_to] > visited_costs[node_from] + cost[node_from][ind_to]:\n visited_costs[node_to] = visited_costs[node_from] + cost[node_from][ind_to]\n visited[node_from] = True\n was_relaxed = True\n if i == size - 1:\n return 1\n return 0\n\n\ndef negative_cycle(adj, cost):\n visited = [False for i in range(len(adj))]\n res = 0\n for start in range(len(adj)):\n if not visited[start]:\n res = bellman_ford(adj, cost, visited, start)\n if res:\n break\n return res\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))\n data = data[3 * m:]\n adj = [[] for _ in range(n)]\n cost = [[] for _ in range(n)]\n for ((a, b), w) in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\n print(negative_cycle(adj, cost))\n"
},
{
"alpha_fraction": 0.48563969135284424,
"alphanum_fraction": 0.5039164423942566,
"avg_line_length": 24.53333282470703,
"blob_id": "37fbd926fb0ea366f0fee32f3aa431094219dc4a",
"content_id": "63906422b4da7e1d18e0dc5177306640d01d5b1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1149,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 45,
"path": "/coursera_algo_hse/c1/w4/inversions.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\nimport sys\n\n\ndef merge(a, left, mid, right):\n ar_left = a[left:mid+1]\n ar_right = a[mid+1:right+1]\n inv = 0\n i = left\n i_left, i_right = 0, 0\n while i_left < len(ar_left) and i_right < len(ar_right):\n if ar_left[i_left] <= ar_right[i_right]:\n a[i] = ar_left[i_left]\n i_left += 1\n else:\n a[i] = ar_right[i_right]\n i_right += 1\n inv += len(ar_left) - i_left\n i += 1\n while i_left < len(ar_left):\n a[i] = ar_left[i_left]\n i += 1\n i_left += 1\n while i_right < len(ar_right):\n a[i] = ar_right[i_right]\n i += 1\n i_right += 1\n return inv\n\n\ndef get_number_of_inversions(a, left, right):\n if left >= right:\n return 0\n mid = left + (right - left) / 2\n inv = get_number_of_inversions(a, left, mid)\n inv += get_number_of_inversions(a, mid+1, right)\n inv += merge(a, left, mid, right)\n return inv\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, a = data[0], data[1:]\n print(get_number_of_inversions(a, 0, len(a)-1))\n"
},
{
"alpha_fraction": 0.5528846383094788,
"alphanum_fraction": 0.5837339758872986,
"avg_line_length": 22.327102661132812,
"blob_id": "a2a59b9b583d7bee0ecddcf8479c977fd29ccfc1",
"content_id": "f0bf6f6b560c9f0cf4232dd35ae574560b18f184",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2496,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 107,
"path": "/stepic_stat_2/w1/w1_tasks.r",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Stepic, statistics 2, week1 tasks\n\n# Task 2, Done\nNA_position <- function(x, y) {\n all(is.na(x) == is.na(y))\n}\n\n# Task 3, Done\nsmart_test <- function(df) {\n t <- table(df)\n if (all(t >= 5)) {\n test.result <- chisq.test(t)\n result <- c(test.result$statistic,\n test.result$parameter,\n test.result$p.value)\n } else {\n result <- fisher.test(t)$p.value\n }\n result\n}\n\n# smart_test(mtcars[1:20, c(\"am\", \"vs\")])\n# 0.628483\n\n# smart_test(mtcars[,c(\"am\", \"vs\")])\n# 0.3475355 1.0000000 0.5555115 \n\n\n# Task 4, Done\nmost_significant <- function(df) {\n p.values <- apply(df, 2, function(x) chisq.test(table(x))$p.value)\n min.p.value <- min(p.values)\n colnames(df)[p.values == min.p.value]\n}\n\n# test_data <- read.csv(\"https://stepic.org/media/attachments/course/524/test_data.csv\", stringsAsFactors = F)\n# most_significant(test_data)\n\n\n# Task 5, Done\nmeans <- apply(iris[, 1:4], 2, mean)\niris$important_cases <- as.factor(ifelse((iris[, 1] > means[1]) + (iris[, 2] > means[2]) + (iris[, 3] > means[3]) + (iris[, 4] > means[4]) >= 3, \"Yes\", \"No\"))\n\n\n# Task 6, Done\nget_important_cases <- function(df) {\n means <- apply(df, 2, mean)\n important.items <- as.integer(df[, 1] > means[1])\n for(i in 2:length(means)) {\n important.items <- important.items + as.integer(df[, i] > means[i])\n }\n print(important.items)\n df$important_cases <- factor(ifelse((important.items >= (length(means) %/% 2 + 1)), \"Yes\", \"No\"), levels=c(\"Yes\", \"No\"))\n df\n}\n\n\n# Task 7, Done\nstat_mode <- function(v) {\n t <- table(v)\n max_val <- max(t)\n as.integer(names(t)[which(t == max_val)])\n}\n\n\n# Task 8, Done\n?chisq.test\n\nmax_resid <- function(df) {\n t <- chisq.test(table(df))$stdres\n max_val <- max(t)\n \n found = FALSE\n target_row = 0\n target_col = 0\n for(row in 1:nrow(t)) {\n for(col in 1:ncol(t)) {\n if (t[row, col] == max_val) {\n target_row = row\n target_col = col\n found = TRUE\n break\n } \n }\n if (found) {\n break\n }\n }\n \n c(rownames(t)[target_row], colnames(t)[target_col])\n}\n\n# Accurate solution\nmax_resid <- function(test_data){ \n d <- table(test_data) \n chi <- chisq.test(d) \n ind <- which(chi$stdres==max(chi$stdres), arr.ind = T) \n return(c(row.names(d)[ind[1]],colnames(d)[ind[2]])) \n}\n\ntest_data <- read.csv(\"https://stepic.org/media/attachments/course/524/test_drugs.csv\")\n\n\n# Task 9, Done\nlibrary(\"ggplot2\")\nggplot(diamonds, aes(x=color, fill=cut)) +\n geom_bar(position='dodge')\n"
},
{
"alpha_fraction": 0.5019546747207642,
"alphanum_fraction": 0.523064911365509,
"avg_line_length": 29.452381134033203,
"blob_id": "43cf9f6052920025491122e340cf8821a41e8498",
"content_id": "c024ef12eac8c896e8d86d250836982ede4be1dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1279,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 42,
"path": "/coursera_algo_hse/c3/w4/dijkstra.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "#Uses python3\n\nimport sys\nimport heapq\n\n\ndef distance(adj, cost, s, t):\n #write your code here\n finished, q, visited_costs = set(), [], [10**9 for i in range(len(adj))]\n visited_costs[s] = 0\n heapq.heappush(q, (0, s))\n while len(q):\n node_cost, node = heapq.heappop(q)\n if node in finished:\n continue\n finished.add(node)\n if node == t:\n return visited_costs[node]\n for target_node, edge_cost in zip(adj[node], cost[node]):\n if target_node in finished:\n continue\n target_cost = node_cost + edge_cost\n if target_cost < visited_costs[target_node]:\n visited_costs[target_node] = target_cost\n heapq.heappush(q, (target_cost, target_node))\n return -1\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))\n data = data[3 * m:]\n adj = [[] for _ in range(n)]\n cost = [[] for _ in range(n)]\n for ((a, b), w) in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\n s, t = data[0] - 1, data[1] - 1\n print(distance(adj, cost, s, t))\n"
},
{
"alpha_fraction": 0.5927889943122864,
"alphanum_fraction": 0.6023330092430115,
"avg_line_length": 26.735294342041016,
"blob_id": "ebd1423f56144dd12500d5ed6d68240adb114c71",
"content_id": "dabf4cabad1f37672dd86339777e83ad398a09aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1886,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 68,
"path": "/coursera_algo_hse/c2/w3/build_heap.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# python2\n\nclass HeapBuilder:\n def __init__(self):\n self._swaps = []\n self._data = []\n self.size = 0\n\n def ReadData(self):\n n = int(raw_input())\n self._data = [0] + [int(s) for s in raw_input().split()]\n self.size = n\n assert n == len(self._data) - 1\n\n def WriteResponse(self):\n print(len(self._swaps))\n for swap in self._swaps:\n print('{0} {1}'.format(swap[0], swap[1]))\n\n @staticmethod\n def Parent(i):\n return i / 2\n\n @staticmethod\n def LeftChild(i):\n return i * 2\n\n @staticmethod\n def RightChild(i):\n return i * 2 + 1\n\n def SiftDown(self, i):\n maxIndex = i\n left, right = self.LeftChild(i), self.RightChild(i)\n if left <= self.size and self._data[left] < self._data[maxIndex]:\n maxIndex = left\n if right <= self.size and self._data[right] < self._data[maxIndex]:\n maxIndex = right\n if i != maxIndex:\n self._swaps.append((i-1, maxIndex-1))\n self._data[i], self._data[maxIndex] = self._data[maxIndex], self._data[i]\n self.SiftDown(maxIndex)\n\n def GenerateSwaps(self):\n # The following naive implementation just sorts \n # the given sequence using selection sort algorithm\n # and saves the resulting sequence of swaps.\n # This turns the given array into a heap, \n # but in the worst case gives a quadratic number of swaps.\n #\n # TODO: replace by a more efficient implementation\n # for i in range(len(self._data)):\n # for j in range(i + 1, len(self._data)):\n # if self._data[i] > self._data[j]:\n # self._swaps.append((i, j))\n # self._data[i], self._data[j] = self._data[j], self._data[i]\n for i in reversed(range(1, len(self._data)/2+1)):\n self.SiftDown(i)\n\n\n def Solve(self):\n self.ReadData()\n self.GenerateSwaps()\n self.WriteResponse()\n\nif __name__ == '__main__':\n heap_builder = HeapBuilder()\n heap_builder.Solve()\n"
},
{
"alpha_fraction": 0.4804905354976654,
"alphanum_fraction": 0.5061315298080444,
"avg_line_length": 24.628570556640625,
"blob_id": "3db8ca146c3e47ac2edac5c49f6a7e0edbcb001a",
"content_id": "ffbfaefd44754f5b853d64243a1ba645922a3e5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 897,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 35,
"path": "/coursera_algo_hse/c1/w4/majority_element.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python2\nimport sys\n\n\ndef get_majority_element(a, left, right):\n if left == right:\n return -1\n if left + 1 == right:\n return a[left]\n mid = left + (right - left) / 2\n maj_left = get_majority_element(a, left, mid-1)\n maj_right = get_majority_element(a, mid, right)\n cnt_left, cnt_right = 0, 0\n if maj_left != -1 or maj_right != -1:\n for i in a[left: right+1]:\n if i == maj_left:\n cnt_left+=1\n elif i == maj_right:\n cnt_right+=1\n fall = (right - left + 1) / 2 + 1\n if cnt_left >= fall:\n return maj_left\n if cnt_right >= fall:\n return maj_right\n return -1\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, a = data[0], data[1:]\n if get_majority_element(a, 0, n-1) != -1:\n print(1)\n else:\n print(0)\n"
},
{
"alpha_fraction": 0.5735887289047241,
"alphanum_fraction": 0.5836693644523621,
"avg_line_length": 25.810810089111328,
"blob_id": "9f9b29b93287df6edaa62d4d0ae6a8f61e38b1cb",
"content_id": "c12807212b196f977f572310626e2d6899574d49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 37,
"path": "/coursera_algo_hse/c2/w1/tree-height.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# python3\n\nimport sys, threading\nsys.setrecursionlimit(10**7) # max depth of recursion\nthreading.stack_size(2**27) # new thread will get stack of such size\n\n\nclass TreeHeight:\n def read(self):\n self.n = int(sys.stdin.readline())\n self.parent = list(map(int, sys.stdin.readline().split()))\n self.g = {}\n for child in range(len(self.parent)):\n p = self.parent[child]\n if p in self.g:\n self.g[p].append(child)\n else:\n self.g[p] = [child]\n\n def compute_height_recursive(self, elem, h=0):\n if elem not in self.g:\n return h\n heights = []\n for node in self.g[elem]:\n heights.append(self.compute_height_recursive(node, h+1))\n return max(heights)\n\n def compute_height(self):\n return self.compute_height_recursive(-1)\n\n\ndef main():\n tree = TreeHeight()\n tree.read()\n print(tree.compute_height())\n\nthreading.Thread(target=main).start()\n"
},
{
"alpha_fraction": 0.5930018424987793,
"alphanum_fraction": 0.6114180684089661,
"avg_line_length": 20.719999313354492,
"blob_id": "32ff6296608858882d2ff93a5bd3700f979fb25e",
"content_id": "f5e5360d648fdbe5f36b35db7e56f469b85b121e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1086,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 50,
"path": "/coursera_algo_hse/c2/w4/hash_substring.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# python2\n\ndef read_input():\n return (raw_input().rstrip(), raw_input().rstrip())\n\n\ndef print_occurrences(output):\n print(' '.join(map(str, output)))\n\n\ndef poly_hash(s, prime, x):\n\th = 0\n\tfor c in reversed(s):\n\t\th = (h * x + ord(c)) % prime\n\treturn h\n\n\ndef precompute_hashes(text, pattern_length, p, x):\n\tsize = len(text) - pattern_length\n\tH = [0 for i in range(size + 1)]\n\tS = text[size:]\n\tH[-1] = poly_hash(S, p, x)\n\ty = 1\n\tfor i in range(pattern_length):\n\t\ty = (y * x) % p\n\tfor i in reversed(range(0, size)):\n\t\tH[i] = (x * H[i+1] + ord(text[i]) - y * ord(text[i + pattern_length])) % p\n\treturn H\n\n\ndef rabin_karp(text, pattern):\n\tp = 15487457\n\tx = 100 #absolutly random\n\tresult = []\n\tp_hash = poly_hash(pattern, p, x)\n\tH = precompute_hashes(text, len(pattern), p, x)\n\tfor i in range(len(text) - len(pattern) + 1):\n\t\tif p_hash != H[i]:\n\t\t\tcontinue\n\t\tif text[i:i+len(pattern)] == pattern:\n\t\t\tresult.append(i)\n\treturn result\n\n\ndef get_occurrences(pattern, text):\n\treturn rabin_karp(text, pattern)\n\n\nif __name__ == '__main__':\n print_occurrences(get_occurrences(*read_input()))\n"
},
{
"alpha_fraction": 0.5058661103248596,
"alphanum_fraction": 0.5148378014564514,
"avg_line_length": 23.559322357177734,
"blob_id": "df6bb0816e2c3e589e8c7ffeb138384bf7171024",
"content_id": "8d1b0126acb47d49096493a7232510599df8b71f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1449,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 59,
"path": "/coursera_algo_hse/c4/w1/suffix_tree.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# python3\nimport sys\n\nsys.setrecursionlimit(10000)\n\nclass Node():\n\n def __init__(self):\n self.next = {}\n\n\ndef build_tree_rec(tree, text):\n if len(text) == 0:\n return\n splited = False\n for edge in tree.next:\n if edge[0] == text[0]:\n splited = True\n i = 1\n while i < len(text) and i < len(edge):\n if text[i] != edge[i]:\n break\n i += 1\n if i < len(edge):\n pref, suf = edge[:i], edge[i:]\n tree.next[pref] = Node()\n tree.next[pref].next[suf] = tree.next[edge]\n del tree.next[edge]\n build_tree_rec(tree.next[text[:i]], text[i:])\n break\n if not splited:\n tree.next[text] = Node()\n\n\ndef travers(tree, res, depth=1):\n for key in tree.next:\n # print(depth * '_' + key)\n travers(tree.next[key], res, depth+1)\n res.append(key)\n\n\ndef build_suffix_tree(text):\n \"\"\"\n Build a suffix tree of the string text and return a list\n with all of the labels of its edges (the corresponding\n substrings of the text) in any order.\n \"\"\"\n result = []\n tree = Node()\n for i in range(len(text)):\n build_tree_rec(tree, text[i:])\n travers(tree, result)\n return result\n\n\nif __name__ == '__main__':\n text = sys.stdin.readline().strip()\n result = build_suffix_tree(text)\n print(\"\\n\".join(result))\n"
},
{
"alpha_fraction": 0.6228652596473694,
"alphanum_fraction": 0.6299810409545898,
"avg_line_length": 20.29292869567871,
"blob_id": "4aa9ac80fe3b1d43948d5052bcd50a52c7b938ea",
"content_id": "2206bed6e3f5b1ef19afb210504c22acab9f35e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2120,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 99,
"path": "/golang/tree_util.go",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "// Coursera golang hw1\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strconv\"\n)\n\nfunc getFileSize(filepath string) int64 {\n\tdescriptor, err := os.Stat(filepath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn descriptor.Size()\n}\n\nfunc isDirectory(path string) (bool, error) {\n\tfileInfo, err := os.Stat(path)\n\treturn fileInfo.IsDir(), err\n}\n\nfunc getPrefix(isLast bool) string {\n\tch := \"├\"\n\tif isLast {\n\t\tch = \"└\"\n\t}\n\treturn ch + \"───\"\n}\n\nfunc getLine(path string, prefix string, isLast bool) string {\n\tfilename := filepath.Base(path)\n\tif isDir, _ := isDirectory(path); !isDir {\n\t\tfilesize := getFileSize(path)\n\t\tif filesize > 0 {\n\t\t\tfilename += \" (\" + strconv.FormatInt(filesize, 10) + \"b)\"\n\t\t} else {\n\t\t\tfilename += \" (empty)\"\n\t\t}\n\t}\n\treturn prefix + getPrefix(isLast) + filename + \"\\n\"\n}\n\nfunc filter(paths []string, predicate func(string) bool ) []string {\n\tfiltered := make([]string, 0)\n\tfor _, path := range paths {\n\t\tif predicate(path) {\n\t\t\tfiltered = append(filtered, path)\n\t\t}\n\t}\n\treturn filtered\n}\n\nfunc recDirTree(out io.Writer, path string, printFiled bool, depth int, prefix string) error {\n\tcurrentItems, err := filepath.Glob(path + \"/*\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tif !printFiled {\n\t\tcurrentItems = filter(currentItems, func(st string) bool {\n\t\t\tisDir, _ := isDirectory(st)\n\t\t\treturn isDir\n\t\t})\n\t}\n\tsort.Strings(currentItems)\n\tfor idx, item := range currentItems {\n\t\tisLast := idx == len(currentItems) - 1\n\t\tout.Write([]byte(getLine(item, prefix, isLast)))\n\t\tif isDir, _ := isDirectory(item); isDir {\n\t\t\tnewPrefix := prefix\n\t\t\tif !isLast {\n\t\t\t\tnewPrefix += \"│\"\n\t\t\t}\n\t\t\tnewPrefix += \"\\t\"\n\t\t\trecDirTree(out, item, printFiled, depth + 1, newPrefix)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc dirTree(out io.Writer, path string, printFiled bool) error {\n\treturn recDirTree(out, path, printFiled, 0, \"\")\n}\n\nfunc main() {\n\tout := os.Stdout\n\tif !(len(os.Args) == 2 || len(os.Args) == 3) {\n\t\tpanic(\"usage go run main.go . [-f]\")\n\t}\n\tpath := os.Args[1]\n\tprintFiles := len(os.Args) == 3 && os.Args[2] == \"-f\"\n\terr := dirTree(out, path, printFiles)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n}\n"
},
{
"alpha_fraction": 0.5646161437034607,
"alphanum_fraction": 0.5708154439926147,
"avg_line_length": 30.28358268737793,
"blob_id": "3f69962a9bf81f3c615ef772bce294b5e9ee5f80",
"content_id": "335b1636dde6e793497785a865789e6432d70052",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2097,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 67,
"path": "/coursera_algo_hse/c2/w3/job_queue.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# python3\n\nclass Worker:\n\n def __init__(self, free_time, index):\n self.free_time = free_time\n self.index = index\n\n def __lt__(self, other):\n if self.free_time == other.free_time:\n return self.index < other.index\n else:\n return self.free_time < other.free_time\n\n def __eq__(self, other):\n return self.free_time == other.free_time and self.free_time == other.free_time\n\n\nclass JobQueue:\n def read_data(self):\n self.num_workers, m = map(int, input().split())\n self.jobs = list(map(int, input().split()))\n self.workers = [Worker(0, i-1) for i in range(self.num_workers+1)]\n assert m == len(self.jobs)\n\n def write_response(self):\n for i in range(len(self.jobs)):\n print(self.assigned_workers[i], self.start_times[i])\n\n def left(self, i):\n return i * 2\n\n def right(self, i):\n return i * 2 + 1\n\n def sift_down(self, i):\n min_index = i\n left_index, right_index = self.left(i), self.right(i)\n if left_index <= self.num_workers and self.workers[left_index] < self.workers[min_index]:\n min_index = left_index\n if right_index <= self.num_workers and self.workers[right_index] < self.workers[min_index]:\n min_index = right_index\n if min_index != i:\n self.workers[min_index], self.workers[i] = self.workers[i], self.workers[min_index]\n self.sift_down(min_index)\n\n def assign(self, p):\n self.workers[1].free_time += p\n self.sift_down(1)\n\n def assign_jobs(self):\n size = len(self.jobs)\n self.assigned_workers = [0 for i in range(size)]\n self.start_times = [0 for i in range(size)]\n for i in range(size):\n self.assigned_workers[i] = self.workers[1].index\n self.start_times[i] = self.workers[1].free_time\n self.assign(self.jobs[i])\n\n def solve(self):\n self.read_data()\n self.assign_jobs()\n self.write_response()\n\nif __name__ == '__main__':\n job_queue = JobQueue()\n job_queue.solve()\n\n"
},
{
"alpha_fraction": 0.4177215099334717,
"alphanum_fraction": 0.4599156081676483,
"avg_line_length": 13.8125,
"blob_id": "39d6338fd4042f5af48eda6a92cb7f6664e52fef",
"content_id": "0fd307bdeefc9e151acbc6b7fb9c22eff68ca2d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 16,
"path": "/coursera_algo_hse/c1/w2/fib.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# Uses python3\n\n\ndef calc_fib(n):\n if (n <= 1):\n return n\n n += 1\n ar = [0 for i in range(n)]\n ar[1] = 1\n for i in range(2, n):\n \tar[i] = ar[i-1] + ar[i-2]\n return ar[-1]\n\n\nn = int(input())\nprint(calc_fib(n))\n"
},
{
"alpha_fraction": 0.6241071224212646,
"alphanum_fraction": 0.6276785731315613,
"avg_line_length": 16.230770111083984,
"blob_id": "cba59218a9f7b7d1d1dd5e8a0ca249f372d54625",
"content_id": "f6d2d7d4debe8f8f70d498f79e43d0d9fd14d009",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1120,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 65,
"path": "/coursera_algo_hse/c4/w1/trie_matching_extended.py",
"repo_name": "re9ulus/moocs_std",
"src_encoding": "UTF-8",
"text": "# python3\nimport sys\n\n\nclass Node:\n\tdef __init__(self):\n\t\tself.next = {}\n\t\tself.is_pattern = False\n\n\ndef build_trie_rec(trie, pat):\n\tif len(pat) == 0:\n\t\ttrie.is_pattern = True\n\t\treturn\n\tc = pat[0]\n\tif c in trie.next:\n\t\tbuild_trie_rec(trie.next[c], pat[1:])\n\telse:\n\t\tcur_node = trie\n\t\tfor c in pat:\n\t\t\tn = Node()\n\t\t\tcur_node.next[c] = n\n\t\t\tcur_node = n\n\t\tcur_node.is_pattern = True\n\n\ndef build_trie(patterns):\n\ttree = Node()\n\tfor pat in patterns:\n\t\tmax_ind = build_trie_rec(tree, pat)\n\treturn tree\n\n\ndef trie_match(text, node):\n\tfor c in text:\n\t\tif node.is_pattern:\n\t\t\tbreak\n\t\tif c in node.next:\n\t\t\tnode = node.next[c]\n\t\telse:\n\t\t\tbreak\n\tif node.is_pattern:\n\t\treturn True\n\treturn False\n\n\ndef solve (text, patterns):\n\tresult = []\n\n\t# write your code here\n\ttrie = build_trie(patterns)\n\tfor i in range(len(text)):\n\t\tif trie_match(text[i:], trie):\n\t\t\tresult.append(i)\n\treturn result\n\ntext = sys.stdin.readline ().strip ()\nn = int (sys.stdin.readline ().strip ())\npatterns = []\nfor i in range (n):\n\tpatterns += [sys.stdin.readline ().strip ()]\n\nans = solve (text, patterns)\n\nsys.stdout.write (' '.join (map (str, ans)) + '\\n')\n"
}
] | 26 |
galazat/Simulation-of-the-exponential-infection_Python
|
https://github.com/galazat/Simulation-of-the-exponential-infection_Python
|
50e8a88047b06490763fa35e0fe6fa8484a5de00
|
2f6f55274a84dc1740b13391a08e34e1fa5bf354
|
941251f3bf9d9fd777a411663990e6a4e2686a99
|
refs/heads/master
| 2022-12-23T04:05:49.875422 | 2020-09-17T19:21:52 | 2020-09-17T19:21:52 | 296,421,313 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5315762162208557,
"alphanum_fraction": 0.5870271921157837,
"avg_line_length": 31.38857078552246,
"blob_id": "d8eecf65821593c90409984923ba8e76fc6005dc",
"content_id": "03741d1c190a5928463e19583f591a1536aa76f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6143,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 175,
"path": "/Simulation of the exponential infection.py",
"repo_name": "galazat/Simulation-of-the-exponential-infection_Python",
"src_encoding": "UTF-8",
"text": "from tkinter import *\r\nimport os\r\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,NavigationToolbar2Tk)\r\nfrom matplotlib.figure import Figure\r\nimport random\r\nimport numpy as np\r\nimport time\r\n\r\n\r\n\r\ndef Move():\r\n global motion, dots, bd1, bd2, bd3, bd4, motion\r\n for num in range(population):\r\n stepX = random.randint(-10, 10)\r\n stepY = random.randint(-10, 10)\r\n if ((dots[num][0] + stepX) < bd2 and (dots[num][0] + stepX) > bd4):\r\n dots[num][0] += stepX\r\n else:\r\n dots[num][0] -= stepX\r\n if ((dots[num][1] + stepY) < bd3 and (dots[num][1] + stepY) > bd1):\r\n dots[num][1] += stepY\r\n else:\r\n dots[num][1] -= stepY\r\n can.coords(Dots[num], dots[num][0], dots[num][1], dots[num][0] + 5, dots[num][1] + 5)\r\n if (True):\r\n root.after(120, Move)\r\n\r\ndef Population():\r\n global population\r\n global dots\r\n dots = np.zeros((population, 2))\r\n for i in range(0, population):\r\n dots[i] = (random.randint(50, winsize[0]-380), random.randint(110, winsize[1]-50))\r\n x1, y1 = dots[i]\r\n Dots.append(can.create_oval(x1, y1, x1+5, y1+5, fill='#aa3333'))\r\n\r\n\r\n'''def find_nearest_vector(array, value): # пример заражения самого близкого индивида\r\n idx = np.array([np.linalg.norm(x+y) for (x,y) in array-value]).argmin()\r\n return array[idx]\r\n\r\ndef Desise():\r\n global illdots, dots, day\r\n #print(illdots)\r\n il = []\r\n for i in illdots:\r\n dots1\r\n a = np.where(dots == find_nearest_vector(dots1, dots[i]))[0]\r\n for j in a:\r\n if (j != i):\r\n il.append(j)\r\n can.itemconfigure(Dots[j], fill='green')\r\n break\r\n for k in il:\r\n illdots.append(k)\r\n day += 1\r\n can.itemconfigure(lday, root, text=\"День: \"+ str(day), ba='#111', fg='#ddd', font=['Akrobat Bold', 22])\r\n if (motion):\r\n root.after(3000, Desise)'''\r\n\r\n\r\ndef Epidemic():\r\n global day, lday, illdots, motion, speed, Dots, noill, ill, population, figure, subp\r\n day = day + 1\r\n speed = 2**(day)\r\n\r\n text = \"День: \" + str(day)\r\n lday['text'] = text\r\n text = \"Скорость распространения: \"+'\\n'+ str(speed) + \" чел/день\"\r\n lspeed['text'] = text\r\n text = \"Здоровые: \"+ str(noill)\r\n lnoill['text'] = text\r\n text = \"Заболевшие: \" + str(ill)\r\n lill['text'] = text\r\n\r\n print(\"speed: \"+str(speed))\r\n for i in range(0, speed):\r\n try:\r\n can.itemconfigure(Dots[i], fill='green')\r\n #can.itemconfigure(Dots[i], fill='#ff5555')\r\n np.delete(illdots, i, axis=0)\r\n if (noill > 0):\r\n noill -=1\r\n if (ill < population):\r\n ill +=1\r\n except:\r\n motion = False\r\n\r\n if (motion):\r\n dayarray.append(day)\r\n illarray.append(ill)\r\n subp.cla()\r\n subp.tick_params(axis='x', colors='white')\r\n subp.tick_params(axis='y', colors='white')\r\n subp.set_xlabel('Количество дней', color='#d0d0d0', size=20)\r\n subp.set_ylabel('Количество заразившихся', color='#d0d0d0', size=20)\r\n subp.plot(dayarray, illarray, color='green', linewidth=3)\r\n cass.draw()\r\n\r\n if (motion):\r\n root.after(3000, Epidemic)\r\n else:\r\n print(\"Популяция полностью заражена!\")\r\n\r\nday = -1\r\ndayarray = []\r\nillarray = []\r\nill = 0\r\npopulation = 1000\r\nnoill = population\r\nspeed = 0\r\nmotion = True\r\nwinsize = [1280, 720]\r\nbd1 = 100\r\nbd2 = winsize[0] -370\r\nbd3 = winsize[1] - 40\r\nbd4 = 40\r\ndots = []\r\nDots = []\r\nzeroP = random.randint(0, 1000)\r\nilldots = []\r\nilldots.append(zeroP)\r\n\r\nwinsize = [1280, 720]\r\nroot = Tk()\r\nroot.title(\"Инфицирование популяции\")\r\nroot.geometry(str(winsize[0])+'x'+str(winsize[1]))\r\nroot.resizable(False, False)\r\nroot[\"bg\"]=\"#fff\"\r\n\r\ncan = Canvas(width=1272, height=714, bg='#111')\r\nlday=Label(root, text=\"День: \"+ str(day+1), ba='#111', fg='#ddd', font=['Akrobat Bold', 22])\r\nlday.place(x=15, y=7)\r\nlpopulation=Label(root,anchor=\"nw\", text=\"Популяция: \"+ str(population), ba='#111', fg='#ddd', font=['Akrobat Bold', 22])\r\nlpopulation.place(x=winsize[0]/3, y=7)\r\nlpopulation2=Label(root, text=\"Популяция: \"+ str(population), ba='#111', fg='#aaa', font=['Akrobat Bold', 14])\r\nlpopulation2.place(x=winsize[0]-300, y=5)\r\nlnoill=Label(root, text=\"Здоровые: \"+ str(noill), ba='#111', fg='#aaa', font=['Akrobat Bold', 14])\r\nlnoill.place(x=winsize[0]-300, y=45)\r\nlill=Label(root, text= \"Заболевшие: \"+ str(ill), ba='#111', fg='#aaa', font=['Akrobat Bold', 14])\r\nlill.place(x=winsize[0]-300, y=85)\r\n\r\nlspeed=Label(root, text=\"Скорость распространения: \"+'\\n'+ str(speed) + \" чел/день\", ba='#111', fg='#aaa', font=['Akrobat Bold', 14])\r\nlspeed.place(x=winsize[0]-300, y=205)\r\nlgraph=Label(root, text=\"График заболевания\", ba='#111', fg='#aaa', font=['Akrobat Bold', 14])\r\nlgraph.place(x=winsize[0]-300, y=355)\r\n\r\nbord1 = can.create_line(20, 80, winsize[0]-350, 80)\r\nbord2 = can.create_line(winsize[0]-350, 80, winsize[0]-350, winsize[1] - 20)\r\nbord3 = can.create_line(winsize[0]-350, winsize[1] - 20, 20, winsize[1] - 20)\r\nbord4 = can.create_line(20, winsize[1] - 20, 20, 80)\r\ncan.itemconfigure(bord1, fill='#57a', width = 3)\r\ncan.itemconfigure(bord2, fill='#57a', width = 3)\r\ncan.itemconfigure(bord3, fill='#57a', width = 3)\r\ncan.itemconfigure(bord4, fill='#57a', width = 3)\r\n\r\nfigure= Figure(figsize=(6,6), dpi=50)\r\nsubp = figure.add_subplot(111)\r\nfigure.set_facecolor('#101010')\r\nsubp.set(facecolor='#ffffff')\r\nsubp.set_xlabel('Количество дней', color='#d0d0d0', size=20)\r\nsubp.set_ylabel('Количество заразившихся', color='#d0d0d0', size= 20)\r\ncass = FigureCanvasTkAgg(figure, master = root)\r\ncass.draw()\r\ncass.get_tk_widget().place(x=winsize[0]-320, y=395)\r\ncan.place(x=2, y=0)\r\n\r\n\r\nPopulation()\r\nMove()\r\nilldots = dots\r\nEpidemic()\r\n\r\n\r\nroot.mainloop()\r\n"
},
{
"alpha_fraction": 0.8541666865348816,
"alphanum_fraction": 0.8541666865348816,
"avg_line_length": 48,
"blob_id": "3c31ce3080c7fc1f904e67a51cf0c13352b86401",
"content_id": "92749e99438dfc93bf361847c9f70543622cd1bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/README.md",
"repo_name": "galazat/Simulation-of-the-exponential-infection_Python",
"src_encoding": "UTF-8",
"text": "# Simulation-of-the-exponential-infection_Python"
}
] | 2 |
NaziaShramin/Lab5A
|
https://github.com/NaziaShramin/Lab5A
|
d4f4bccaa257b277be8e4571f8ed7f86f70afc37
|
b4ff47c6ad0782f4cee8d8ead940bb04dd32f313
|
7c3ece398c8b429e8b0827951b3d6d40afde24a0
|
refs/heads/master
| 2020-04-08T14:43:11.123588 | 2018-11-28T05:56:08 | 2018-11-28T05:56:08 | 159,448,823 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5456394553184509,
"alphanum_fraction": 0.5530129671096802,
"avg_line_length": 34.08256912231445,
"blob_id": "c8779b91e879a226e8630762946dc79709f8c44c",
"content_id": "14beb15193acc6dc2ce79267c5479ff584201c3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3933,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 109,
"path": "/lab5A.py",
"repo_name": "NaziaShramin/Lab5A",
"src_encoding": "UTF-8",
"text": "# Course: Data Structure , Author:Nazia Sharmin, assignment:Lab5A,instructor:Professor-Diego Aguirre,\r\n# T.A.:Anindita Nath, date of creation 10/27/2018: Purpose of program: Insert a file in the minheap configuration and sort\r\n# the list in minheap and extracting mean\r\nclass MinHeap:\r\n\r\n def __init__(self):\r\n self.heap_array = []\r\n\r\n def insert(self, k):\r\n # add the new value to the end of the array.\r\n\r\n self.heap_array.append(k)\r\n\r\n # percolate up from the last index to restore heap property.\r\n self.percolate_up(len(self.heap_array) - 1)\r\n\r\n def percolate_up(self, node_index):\r\n while node_index > 0:\r\n # compute the parent node's index\r\n parent_index = (node_index - 1) // 2\r\n\r\n # check for a violation of the max heap property\r\n if self.heap_array[node_index] >= self.heap_array[parent_index]:\r\n # no violation, so percolate up is done.\r\n return\r\n else:\r\n # swap heap_array[node_index] and heap_array[parent_index]\r\n print(\"Swapping values\",(self.heap_array[parent_index], self.heap_array[node_index]))\r\n temp = self.heap_array[node_index]\r\n self.heap_array[node_index] = self.heap_array[parent_index]\r\n self.heap_array[parent_index] = temp\r\n\r\n # continue the loop from the parent node\r\n node_index = parent_index\r\n\r\n def percolate_down(self, node_index):\r\n child_index = 2 * node_index + 1\r\n value = self.heap_array[node_index]\r\n\r\n while child_index < len(self.heap_array):\r\n # Find the max among the node and all the node's children\r\n max_value = value\r\n max_index = -1\r\n i = 0\r\n while i < 2 and i + child_index < len(self.heap_array):\r\n if self.heap_array[i + child_index] < max_value:\r\n max_value = self.heap_array[i + child_index]\r\n max_index = i + child_index\r\n i = i + 1\r\n\r\n # check for a violation of the max heap property\r\n if max_value == value:\r\n return\r\n else:\r\n # swap heap_array[node_index] and heap_array[max_index]\r\n print(\" percolate_down() swap: %d <-> %d\" % (self.heap_array[node_index], self.heap_array[max_index]))\r\n temp = self.heap_array[node_index]\r\n self.heap_array[node_index] = self.heap_array[max_index]\r\n self.heap_array[max_index] = temp\r\n\r\n # continue loop from the max index node\r\n node_index = max_index\r\n child_index = 2 * node_index + 1\r\n\r\n\r\n\r\n\r\n def extract_min(self):\r\n # save the max value from the root of the heap.\r\n print(\"extract_min():\")\r\n min_value = self.heap_array[0]\r\n\r\n # move the last item in the array into index 0.\r\n replace_value = self.heap_array.pop()\r\n if len(self.heap_array) > 0:\r\n self.heap_array[0] = replace_value\r\n\r\n # percolate down to restore max heap property.\r\n self.percolate_down(0)\r\n\r\n # return the max value\r\n return min_value\r\n\r\n def is_empty(self):\r\n return len(self.heap_array) == 0\r\n\r\n def read_file(self, file_name):\r\n number_list=[]\r\n file = open(file_name, \"r\")\r\n for line in file:\r\n number_list.append([int(n) for n in line.strip().split(',')])\r\n # print(number_list)\r\n return number_list[0]\r\n\r\n # Sorts the list of numbers using the heap sort algorithm\r\n\r\nh = MinHeap()\r\n\r\n\r\ninput_list = h.read_file(\"heapfile.txt\")\r\nprint(\"Original list\", input_list)\r\n\r\nfor item in input_list:\r\n h.insert(item)\r\nprint(\"Sorted heap\", h.heap_array)\r\nwhile len(h.heap_array) > 0:\r\n removed_value = h.extract_min()\r\n print((removed_value, h.heap_array))\r\nprint()\r\n"
}
] | 1 |
jiajuntan/nba-scrapper
|
https://github.com/jiajuntan/nba-scrapper
|
2def73e219f4485823bde42f6abd395d38a532da
|
320809300747ea21e286d2eab3e1410ee330e384
|
6b42335503db79233e3c55535f6233f6dafe33cd
|
refs/heads/master
| 2020-03-10T00:23:48.731685 | 2018-04-26T06:27:45 | 2018-04-26T06:27:45 | 129,080,633 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7349081635475159,
"alphanum_fraction": 0.7532808184623718,
"avg_line_length": 26.214284896850586,
"blob_id": "7ac1cf724b8d46d492f17ca15f2c78c757f047c3",
"content_id": "2e983ec8be01b7c601665acb063ee6bcb4b49a60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 381,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 14,
"path": "/README.md",
"repo_name": "jiajuntan/nba-scrapper",
"src_encoding": "UTF-8",
"text": "# Official NBA Statistics Scraper\n\nScrapers for [official site](https://stats.nba.com/) of NBA statistics\n\n**Libraries**:\n1. Pandas\n2. NumPy\n2. Selenium\n3. BeautifulSoup\n4. Regex\n\n**Current Scrappers**:\n1. [NBA Teams Advanced Box Scores](https://stats.nba.com/teams/boxscores-traditional/)\n2. [NBA Players Advanced Box Scores](https://stats.nba.com/players/boxscores-traditional/)\n"
},
{
"alpha_fraction": 0.6689038276672363,
"alphanum_fraction": 0.6809843182563782,
"avg_line_length": 29.20270347595215,
"blob_id": "da129f1b8475a04d2c1eb524039a4960513dfcab",
"content_id": "0404d6132c7e9a30e89589e24149db96f14e9e86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2235,
"license_type": "no_license",
"max_line_length": 214,
"num_lines": 74,
"path": "/NBA Players Advanced Box Scores Scraper.py",
"repo_name": "jiajuntan/nba-scrapper",
"src_encoding": "UTF-8",
"text": "# Importing libraries\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup as bsoup\nimport pandas as pd\nimport numpy as np\nimport re\nfrom collections import OrderedDict\n\n# Establishing seasons to scrap\nstart_year = str(2008)\nend_year = str(int(start_year)+1)[2:].zfill(2)\nseason = \"{}-{}\".format(start_year,end_year)\nseason\n\n# Selenium automated browser\nweblink = 'http://stats.nba.com/players/boxscores/?Season={}&SeasonType=Regular%20Season'.format(season)\ndriver = webdriver.Chrome(executable_path=\"/Users/JiaJun/Downloads/chromedriver\") \ndriver.get(weblink)\n\n# Main function \ndef func(rows):\n file2 = []\n for row in rows:\n file2.append(row.get_text().strip().split(\"\\n\"))\n file2 = np.transpose(file2[1:])\n i = 0\n for key in dic.keys():\n dic[key].extend(list(file2[i]))\n i += 1\n return dic\n\n\n# Parsing using BeautifulSoup\nbs_obj = bsoup(driver.page_source, 'html.parser')\nrows = bs_obj.find_all('table')[0].find_all('tr')\n\n\n# Dictionary to store data & keys to include\nfile = []\nfor row in rows:\n file.append(row.get_text().strip().split(\"\\n\"))\n\ndic = OrderedDict()\nfor i in file[0]:\n if i == 'Match\\xa0Up':\n dic['Match'] = []\n elif i == 'Game\\xa0Date':\n dic['Date'] = []\n else:\n dic[i] = []\n\n# Total pages (tables) to scroll through \ntotal_pages = driver.find_element_by_xpath(r\"//div[@class='stats-table-pagination__inner stats-table-pagination__inner--top']\")\ntotal_pages = int(re.findall(r\"of (\\d+)\",total_pages.text)[0])\n\n\n# Automating scrolling of tables, and cleaning data to store in dictionary\nbutton = driver.find_element_by_xpath(r\"//div[@class=\\'stats-table-pagination__inner stats-table-pagination__inner--top\\']//div[@class=\\'stats-table-pagination__info\\']//a[@class=\\'stats-table-pagination__next\\']\")\n\npage = 1 \nwhile page <= total_pages:\n\tbs_obj = bsoup(driver.page_source, 'html.parser')\n\trows = bs_obj.find_all('table')[0].find_all('tr')\n\tdic = func(rows)\n\tbutton.click()\n\tpage += 1\n\n# Converting to dataframe and exporting as CSV \ndf = pd.DataFrame(dic)\ndf['season'] = [season]*df.shape[0]\n\nfiledestination = r\"/Users/JiaJun/Desktop/nba-players_{}.csv\".format(season) # change filepath here \n\ndf.to_csv(filedestination,index=None)\n"
},
{
"alpha_fraction": 0.670437753200531,
"alphanum_fraction": 0.6837186217308044,
"avg_line_length": 33.45762634277344,
"blob_id": "893b2740318ca479e86e9a337b8dbb244dc42765",
"content_id": "fe8f8210a38e25cbf1bb5571def44993d7d5b972",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2033,
"license_type": "no_license",
"max_line_length": 208,
"num_lines": 59,
"path": "/NBA Team Advanced Box Scores Scraper.py",
"repo_name": "jiajuntan/nba-scrapper",
"src_encoding": "UTF-8",
"text": "# Importing required libraries\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup as bsoup\nfrom collections import OrderedDict\nimport pandas as pd\nimport numpy as np\n\n# Defining season of interest to scrap\nstart_year = str(2016)\nend_year = str(int(start_year)+1)[2:].zfill(2)\nseason = \"{}-{}\".format(start_year,end_year)\n\n# Selenium's WebDriver for webpage automation and BeautifulSoup as parser\ndriver = webdriver.Chrome(executable_path=\"/Users/JiaJun/Downloads/chromedriver\")\nfilepath = \"https://stats.nba.com/teams/boxscores-traditional/?Season={}&SeasonType=Regular%20Season\".format(season)\ndriver.get(filepath)\nbs_obj = bsoup(driver.page_source, 'html.parser')\nrows = bs_obj.find_all('table')[0].find_all('tr')\n\n# OrderedDictionary to store scrapped data and defining keys as DataFrame columns\nfile = []\nfor row in rows:\n file.append(row.get_text().strip().split(\"\\n\"))\n\ndic = OrderedDict()\nfor i in file[0]:\n if i == 'Match\\xa0Up':\n dic['Match'] = []\n elif i == 'Game\\xa0Date':\n dic['Date'] = []\n else:\n dic[i] = []\n \n# Main function in loop to process and store data\ndef func(rows):\n file2 = []\n for row in rows:\n file2.append(row.get_text().strip().split(\"\\n\"))\n file2 = np.transpose(file2[1:])\n i = 0\n for key in dic.keys():\n dic[key].extend(list(file2[i]))\n i += 1\n return dic\n\n# Automation through WebDriver and looping through all pages of data \nbutton = driver.find_element_by_xpath(r\"//div[@class='stats-table-pagination__inner stats-table-pagination__inner--top']//div[@class='stats-table-pagination__info']//a[@class='stats-table-pagination__next']\")\npage = 1\nwhile page <= 50:\n bs_obj = bsoup(driver.page_source, 'html.parser')\n rows = bs_obj.find_all('table')[0].find_all('tr')\n dic = func(rows)\n button.click()\n page += 1\n\n# Transforming into DataFrame and exporting as csv\ndf = pd.DataFrame(dic)\nfiledestination = r\"/Users/JiaJun/Desktop/nba-teams_{}.csv\".format(season)\ndf.to_csv(filedestination,index=None)\n"
}
] | 3 |
izaazk/tkinterPOS
|
https://github.com/izaazk/tkinterPOS
|
cfe4c875ca5516df292081abe799252e9a31995d
|
ffa3719b68c573f5292326abb73b556c0615eda7
|
3a871d335a3a743c1fa07674ab16657fabfae1b4
|
refs/heads/master
| 2020-03-15T19:18:44.472920 | 2018-05-06T04:00:03 | 2018-05-06T04:00:03 | 132,305,320 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5958333611488342,
"alphanum_fraction": 0.6220238208770752,
"avg_line_length": 19.962499618530273,
"blob_id": "54002570aa0e7801ffa18ab3ff6c96b9615cafd0",
"content_id": "efe382519916f58f46db6aeb4425a0cf69e38143",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1680,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 80,
"path": "/adminChoice.py",
"repo_name": "izaazk/tkinterPOS",
"src_encoding": "UTF-8",
"text": "'''\n@author: Izaaz Kothawala\n@date: 03/26/2018\n@class: ITMD 413\n@Lab: 08\n\n\n\n'''\n\nimport datetime\nimport locale\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.messagebox import showinfo, showwarning, showerror\n \n\ndef goToPOS():\n root.destroy()\n import main\n main.root\n \ndef goToAdmin():\n root.destroy()\n import AdminPanel\n AdminPanel.root\n\n\ndef buildFrame():\n \n root = Tk()\n root.geometry(\"800x600\")\n \n root.resizable(False, False)\n root.title(\"POS - Administration\")\n \n bg_img = PhotoImage(file=\"./img/regbg.png\")\n\n lbl1 = Label(root, image=bg_img)\n lbl1.image = bg_img\n lbl1.place(x=0, y=0, relwidth=1, relheight=1) \n\n \n img = PhotoImage(file=\"./img/logo.png\")\n\n \n lbl = Label(root, image=img)\n lbl.image = img\n lbl.pack(fill = X, pady=10);\n \n lbl = Label(root, text = \"Administrator, Choose an Option Below: \", font=(\"Helvetica\", 26))\n lbl.pack(fill = X, pady = 10);\n frame2 = Frame(root)\n frame2.pack(fill = Y, expand=FALSE, padx=20, pady=10)\n\n frame3 = Frame(root)\n frame3.pack(side = TOP, expand=TRUE)\n \n \n pos_image = PhotoImage(file=\"./img/posLogo.png\")\n pos_button = Button(frame3, image=pos_image, command = goToPOS)\n pos_button.grid();\n pos_button.image = pos_image \n \n frame4 = Frame(root)\n frame4.pack(side = BOTTOM, expand=TRUE)\n #inventory button\n usermgt_image = PhotoImage(file=\"./img/userAdmin.png\")\n inventoryL = Button(frame4, image=usermgt_image, command = goToAdmin)\n inventoryL.grid();\n inventoryL.image = usermgt_image \n \n \n \n \n return root\n\n\nroot = buildFrame()\nroot.mainloop()\n\n\n\n"
},
{
"alpha_fraction": 0.5557109713554382,
"alphanum_fraction": 0.58065265417099,
"avg_line_length": 28.79861068725586,
"blob_id": "010a7b530bb342ceae280fc816bc9b8d0346a5be",
"content_id": "66d7019581a531d42c50d1d9db9b5f9cffd72e31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4290,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 144,
"path": "/editUser.py",
"repo_name": "izaazk/tkinterPOS",
"src_encoding": "UTF-8",
"text": "import datetime\nimport locale\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.messagebox import showinfo, showwarning, showerror\nimport uuid\nimport hashlib\nimport sqlite3\nfrom sqlite3 import Error\nfrom sqlite3 import Cursor\nimport sys\n\n\ndef buildFrame () :\n \n global name_input, pass_input, ssn_input, tkvar\n \n root = Tk()\n root.geometry(\"800x600\")\n \n frame1 = Frame(root)\n frame1.pack()\n \n root.resizable(False, False)\n root.title(\"POS - Edit Employee\")\n \n bg_img = PhotoImage(file=\"./img/regbg.png\")\n\n lbl1 = Label(root, image=bg_img)\n lbl1.image = bg_img\n lbl1.place(x=0, y=0, relwidth=1, relheight=1) \n\n \n img = PhotoImage(file=\"./img/logo.png\")\n\n \n lbl = Label(root, image=img)\n lbl.image = img\n lbl.pack(fill = X, pady=10);\n \n lbl = Label(root, text = \"Please Edit the details below: \", font=(\"Helvetica\", 26))\n lbl.pack(fill = X, pady = 10);\n frame2 = Frame(root)\n frame2.pack(fill = Y, expand=FALSE, padx=20, pady=10)\n \n #username label\n user_search = Label(frame2, text=\"Search Name:\", font=(\"Helvetica\", 24))\n fullname_label.grid(row=1, column=0, sticky=W, padx = 20)\n \n #username label\n fullname_label = Label(frame2, text=\"Full Name:\", font=(\"Helvetica\", 24))\n fullname_label.grid(row=1, column=0, sticky=W, padx = 20)\n \n #password label\n password_label = Label(frame2, text=\"Password:\", font=(\"Helvetica\", 24))\n password_label.grid(row=2, column=0, sticky=W, padx = 20)\n \n rights_label = Label(frame2, text=\"Employee Type:\", font=(\"Helvetica\", 24))\n rights_label.grid(row=3, column=0, sticky=W, padx = 20)\n \n #input for username\n name_input = StringVar()\n \n fullname_entry = Entry(frame2, textvariable=name_input, font=\"Helvetica 20\")\n fullname_entry.grid(row=1, column=1, sticky=W, padx = 10, ipadx=70, ipady=10)\n \n #input for password\n pass_input = StringVar()\n \n password_entry = Entry(frame2, textvariable=pass_input, show=\"*\", font=\"Helvetica 20\")\n password_entry.grid(row=2, column=1, sticky=W, padx = 10, pady = 10,ipadx=70, ipady=10)\n\n# Create a Tkinter variable\n tkvar = StringVar(root)\n \n# Dictionary with options\n choices = { 'Cashier','Manager'}\n tkvar.set('Cashier') # set the default option\n \n popupMenu = OptionMenu(frame2, tkvar, *choices)\n popupMenu.grid(row = 3, column = 1, padx = 10, pady = 10, ipadx=70, ipady = 10,sticky = N+S+E+W)\n popupMenu.configure(font=('Helvetica', 20))\n# on change dropdown value\n\n \n# link function to change dropdown\n #submit button\n login_image = PhotoImage(file=\"./img/register_submit.png\")\n submit_button = Button(frame2, image=login_image, command = edit_record)\n submit_button.grid(column = 1, pady = 10);\n submit_button.image = login_image\n \n \n return root\n\n \ndef edit_record():\n rights = 0\n name = name_input.get()\n pw = pass_input.get()\n if (tkvar.get() == \"Cashier\"):\n rights = 1\n if (tkvar.get() == \"Manager\"):\n rights = 2\n submission = hashlib.sha1(pw.encode('utf-8')).hexdigest()\n \n if (name == \"\") or (pw == \"\"):\n showerror(\"ERROR\", \"You cannot have a blank name or pin!\")\n root.destroy()\n else:\n \n try:\n cont = sqlite3.connect('./database/users.db')\n with cont:\n \n c = cont.cursor()\n c.execute('''INSERT INTO employee (emp_name, emp_pass, rights)\n VALUES(?,?, ?)''', (name,submission, rights))\n \n c.execute(\"\"\"UPDATE employee SET emp_name = ? ,emp_pass = ?,rights = ? WHERE employee_id= ? \"\"\",\n (name,submission,rights,emp_id))\n cont.commit()\n \n showinfo(\"New Employee Added\",\"Added successfully! \\n\\nEmployee Name: \" + name + \"\\nEmployee ID:\" + str(c.lastrowid))\n root.destroy()\n success = 1\n print (success)\n if (success == 1):\n import adminChoice\n adminChoice.root\n \n cont.close()\n \n \n \n except Error as e:\n print(e)\n\n\n \n \n \nroot = buildFrame()\nroot.mainloop()"
},
{
"alpha_fraction": 0.5856000185012817,
"alphanum_fraction": 0.6287999749183655,
"avg_line_length": 23.223684310913086,
"blob_id": "008a0ac3c4b91a6209a910f16425748e0fb0e072",
"content_id": "5d3747ef111b92f36343ce1b8851707f9d071c11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1875,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 76,
"path": "/AdminPanel.py",
"repo_name": "izaazk/tkinterPOS",
"src_encoding": "UTF-8",
"text": "'''\n@author: Izaaz Kothawala\n@date: 03/26/2018\n@class: ITMD 413\n@Lab: 08\n\n\n\n'''\n\nimport datetime\nimport locale\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.messagebox import showinfo, showwarning, showerror\n\ndef buildFrame () :\n \n global name_input, pass_input, ssn_input, tkvar\n \n root = Tk()\n root.geometry(\"1000x600\")\n \n root.resizable(False, False)\n root.title(\"POS - Administration\")\n \n #root.protocol(\"WM_DELETE_WINDOW\", disable_event)\n \n # create all of the main containers\n top_frame = Frame(root, width=400, height=40, pady=10, padx=10)\n center = Frame(root, width=400, height=200, padx=3, pady=3)\n btm_frame = Frame(root, width=400, height=80, pady=3)\n btm_frame2 = Frame(root, width=400, height=40, pady=3)\n btm_frame3 = Frame(root, width=400, height=45, pady=3)\n\n \n \n # layout all of the main containers\n root.grid_rowconfigure(1, weight=1)\n root.grid_columnconfigure(0, weight=1)\n\n top_frame.grid(row=0, sticky = \"\")\n center.grid(row=1, sticky=\"\")\n btm_frame.grid(row=3, sticky=\"e\", padx = 320)\n btm_frame2.grid(row=4, sticky=\"\")\n btm_frame3.grid(row=5, sticky=\"ew\")\n \n logo = PhotoImage(file=\"./img/logo.png\")\n lbl = Label(top_frame, image=logo)\n lbl.image = logo\n lbl.grid(padx = 130, sticky = 'nsew');\n \n usermgt_label = Label(center, text='User Management Functions:')\n usermgt_label.grid(sticky = 'n', row = 0, ipadx = 40, ipady=10,pady = 5) \n \n addUserBtn_image = PhotoImage(file=\"./img/user/addUser.png\")\n addUserBtn = Button(center, image=addUserBtn_image, command = addUser)\n addUserBtn.grid(row = 1);\n addUserBtn.image = addUserBtn_image\n \n \n return root\n\n\n\ndef addUser():\n root.destroy()\n import addEmp\n addEmp.root\n \n\n\n\n\nroot = buildFrame()\nroot.mainloop()\n\n\n\n\n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.5193902254104614,
"alphanum_fraction": 0.5392451882362366,
"avg_line_length": 30.824851989746094,
"blob_id": "0586e17c87947461b44925abadeddd765523c2cf",
"content_id": "b058f056a98572894f487468ee4257dbfc7fe197",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26895,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 845,
"path": "/main.py",
"repo_name": "izaazk/tkinterPOS",
"src_encoding": "UTF-8",
"text": "'''\n@author: Izaaz Kothawala\n@date: 03/26/2018\n@class: ITMD 413\n@Lab: 08\n\n\n\n'''\n\nimport datetime\nimport locale\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.messagebox import showinfo, showwarning, showerror\nimport sqlite3\nfrom sqlite3 import Error\nfrom sqlite3 import Cursor\nfrom tkinter import simpledialog\nimport os\nimport hashlib\n\n\n\ndef buildFrame () :\n \n global tree, inputSKU, skuEntry, subAmount, taxAmount,totalAmount, trans_counter, rights1\n global userID, rights, emp_name, pcButton, qtyButton, lineVoid, lineVoid, pcOverride,transVoid, btm_frame2, total_button, goButton\n global timestamp\n locale.setlocale( locale.LC_ALL, '' )\n timestamp = datetime.datetime.now()\n\n global popup, user_id, user_pw\n \n initial=0\n root = Tk()\n root.geometry(\"1000x800\")\n \n root.resizable(False, False)\n root.title(\"POS - Main Screen\")\n \n #root.protocol(\"WM_DELETE_WINDOW\", disable_event)\n \n root.state('zoomed')\n \n global rights, employeeID\n subAmount = StringVar()\n taxAmount = StringVar()\n totalAmount = StringVar()\n trans_counter = StringVar()\n emp_name = StringVar() \n \n try:\n temp = open('./database/active_user','r').read().split('\\n')\n \n \n employeeID = temp[0]\n name = temp[1]\n rights = temp[2]\n \n \n os.remove(\"./database/active_user\")\n except FileNotFoundError:\n root.destroy()\n import Login_screenn\n Login_screenn.root\n #userID.set(employeeID)\n \n rights1 = int(rights)\n \n if (rights1 == 1):\n position = \"Cashier\"\n if (rights1 == 2):\n position = \"Manager\"\n\n emp_name.set(name.title() + \" - \" + position)\n \n if (initial is 0):\n subAmount.set(0)\n taxAmount.set(0)\n totalAmount.set(0)\n trans_counter.set(\"0\")\n initial = initial + 1\n \n # create all of the main containers\n top_frame = Frame(root, width=400, height=40, pady=10, padx=10)\n center = Frame(root, width=400, height=200, padx=3, pady=3)\n btm_frame = Frame(root, width=400, height=80, pady=3)\n btm_frame2 = Frame(root, width=400, height=40, pady=3)\n btm_frame3 = Frame(root, width=400, height=45, pady=3)\n\n \n \n # layout all of the main containers\n root.grid_rowconfigure(1, weight=1)\n root.grid_columnconfigure(0, weight=1)\n\n top_frame.grid(row=0, sticky = \"\")\n center.grid(row=1, sticky=\"\")\n btm_frame.grid(row=3, sticky=\"e\", padx = 320)\n btm_frame2.grid(row=4, sticky=\"\")\n btm_frame3.grid(row=5, sticky=\"ew\")\n \n #the actual widgets on the top part of screen\n \n logo = PhotoImage(file=\"./img/logo.png\")\n lbl = Label(top_frame, image=logo)\n lbl.image = logo\n lbl.grid(padx = 130, sticky = 'nsew');\n \n # layout the widgets in the top frame\n #company_label.grid(row=1, column = 0, sticky=N+S+E+W)\n #item sku entry:\n \n #skuLabel = Label(center, text='Click Here to Enter a SKU: ')\n #skuLabel.grid(row = 1,sticky = 'w', ipadx = 60)\n \n inputSKU = StringVar()\n \n skuEntry = Entry(center, textvariable = inputSKU, width = 100)\n skuEntry.grid(row = 1, ipadx = 100, ipady=20, sticky='w') \n skuEntry.insert(0, \"Click Here to Enter a SKU\") \n skuEntry.bind(\"<Button-1>\", clear_search) \n \n goButton = Button(center,text=\"Enter\",bg=\"white\", height = 1, width = 5, command = fetchData)\n goButton.grid(row = 1, ipadx = 40, ipady = 20, column = 2);\n\n \n\n \n #actual item loader\n from tkinter import ttk \n tree = ttk.Treeview(center,show=\"headings\", height = 20)\n\n tree[\"columns\"]=(\"one\",\"two\",\"three\",\"four\")\n tree.column(\"one\", width=125 )\n tree.column(\"two\", width=600 )\n tree.column(\"three\", width=150)\n tree.column(\"four\", width=100)\n tree.heading(\"one\",text=\"SKU\")\n tree.heading(\"two\", text=\"Description\")\n tree.heading(\"three\", text=\"Price\")\n tree.heading(\"four\", text=\"Quantity\")\n \n vsb = ttk.Scrollbar(orient=\"vertical\", command=tree.yview)\n vsb.grid(column=5, row=2, sticky='ns', in_=center)\n tree.configure(yscrollcommand=vsb.set)\n #tree.insert(\"\" , 0, values=(\"91919\",\"Monster Energy Drink 12oZ\",\"2.99\", \"1\"))\n\n tree.grid(sticky = E+W+N+S, row = 2,columnspan = 4)\n tree.bind('<Button-1>', handle_click)\n \n subTotal_label = Label(btm_frame, text='Active Customer: ')\n subTotal_label.grid(sticky = 'e', row = 0, ipadx = 40, ipady=10, pady = 5)\n \n \n #total rows\n subTotal_label = Label(btm_frame, text='Subtotal: $')\n subTotal_label.grid(sticky = 'w', row = 0, ipadx = 40, ipady=10, pady = 5)\n subTotal_field = Entry(btm_frame, textvariable=subAmount, justify = LEFT,state = 'readonly')\n subTotal_field.grid( row = 0, column = 2,ipadx = 20, ipady=5, pady = 5)\n \n tax_label = Label(btm_frame, text='Tax: $')\n tax_label.grid(sticky = 'w', row = 1, ipadx = 40, ipady=10)\n tax_field = Entry(btm_frame, textvariable=taxAmount, justify = LEFT,state = 'disabled')\n tax_field.grid( row = 1, column = 2,ipadx = 20, ipady=5)\n \n total_label = Label(btm_frame, text='Total Due: $', bg=\"yellow\")\n total_label.grid(sticky = 'w', row = 2, ipadx = 40, ipady=10,pady = 5)\n total_field = Entry(btm_frame, textvariable=totalAmount, justify = LEFT, readonlybackground=\"yellow\",state = 'readonly')\n total_field.grid( row = 2, column = 2,ipadx = 20, ipady=10, pady = 5)\n\n \n \n \n \n #register function buttons\n pcButton = Button(btm_frame2,text=\"Price Check\",bg=\"cyan\", height = 1, width = 5, command = priceCheck)\n pcButton.grid(row = 1, ipadx = 40, ipady = 20, padx = 17);\n \n qtyButton = Button(btm_frame2,text=\"Quantity\",bg=\"magenta\", height = 1, width = 5, command = qtyModify)\n qtyButton.grid(row = 1, column = 2,ipadx = 40, ipady = 20, padx = 17);\n \n lineVoid = Button(btm_frame2,text=\"Line Void\",bg=\"red\", height = 1, width = 5, command = lnVoid)\n lineVoid.grid(row = 1, column = 3,ipadx = 40, ipady = 20, padx = 17);\n \n \n pcOverride = Button(btm_frame2,text=\"Price Override\",bg=\"yellow\", height = 1, width = 5, command = popupmsg, state='normal')\n pcOverride.grid(row = 1, column = 4,ipadx = 40, ipady = 20, padx = 17);\n \n\n\n transVoid = Button(btm_frame2,text=\"Trans Void\",bg=\"white\", height = 1, width = 5, command = delButton)\n transVoid.grid(row = 1, column = 6,ipadx = 40, ipady = 20, padx = 17);\n \n #totalButton = Button(btm_frame2,text=\"Tender\",bg=\"green\", height = 1, width = 5, command = tenderFunc)\n #totalButton.grid(row = 1, column = 6,ipadx = 40, ipady = 20, padx = 30);\n\n total_image = PhotoImage(file=\"./img/tenders/mainbtn.png\")\n total_button = Button(btm_frame2, image=total_image, command = tenderFunc, state='disabled')\n total_button.grid(row = 1, column = 8, padx = 70);\n total_button.image = total_image\n \n #bottom of program\n \n user_label = Label(btm_frame3, text='Current User: ')\n\n active_user = Entry(btm_frame3, textvariable=emp_name, state = 'readonly')\n user_label.grid(sticky = 'w', row = 0, ipadx = 0)\n active_user.grid(sticky = 'n', row = 0, ipadx = 40, column = 1, padx = 0)\n \n spacer = Label(btm_frame3, text='')\n spacer.grid(row = 0, column = 4 ,padx = 200, ipadx = 0)\n \n trans_qty = Label(btm_frame3, text='Total Number of Items: ')\n trans_qty.grid(row = 0, column = 5 ,padx = 0, ipadx = 0)\n \n \n transcounter = Entry(btm_frame3, textvariable=trans_counter, state = 'readonly')\n transcounter.grid(sticky = 'n', row = 0, ipadx = 0, column = 6, padx = 0)\n return root\n\ndef delButton():\n \n if messagebox.askokcancel(\"Void Transaction\", \"Are you SURE you want to void the entire transaction?\"):\n \n \n global subAmount\n orig_subtotal = subAmount.get()\n conv_subtotal = float (orig_subtotal)\n conv_subtotal = 0\n \n sub_update = str(\"%.2f\" % conv_subtotal)\n subAmount.set(sub_update)\n \n \n \n global taxAmount\n orig_tax = taxAmount.get()\n conv_tax = float (orig_tax)\n conv_tax = 0\n \n tax_update = str(\"%.2f\" % conv_tax)\n \n taxAmount.set(tax_update)\n \n \n \n global totalAmount\n \n orig_total = totalAmount.get()\n orig_total = float(orig_total)\n \n update = 0\n final_update = str(\"%.2f\" % update)\n \n totalAmount.set(final_update)\n \n \n trans_counter.set(str(0))\n \n #clear the cartlist\n x = tree.get_children()\n \n for item in x:\n tree.delete(item)\n \n if not tree.get_children():\n total_button.config(state = 'disabled')\n \n\n \n \ndef disable_event():\n pass\n\ndef qtyModify():\n \n \n sell_price = tree.item(tree.selection())['values'][2]\n orig_qty = tree.item(tree.selection())['values'][3]\n \n orig_qty = int(orig_qty)\n sell_price = float(sell_price)\n \n answer = simpledialog.askinteger(\"Quantity Edit\", \"Enter Quantity:\",\n parent=root)\n \n if (answer == \"\"):\n showerror(\"NO ENTRY\", \"You did not enter anything...?\")\n \n if answer is not None and answer is not \"\":\n \n tree.set(tree.selection(), 3, answer)\n \n original_sell = orig_qty * sell_price\n \n sel_price2 = sell_price * answer\n \n global subAmount\n orig_subtotal = subAmount.get()\n conv_subtotal = float (orig_subtotal)\n \n conv_subtotal = conv_subtotal - original_sell\n conv_subtotal = conv_subtotal + sel_price2\n \n sub_update = str(\"%.2f\" % conv_subtotal)\n \n subAmount.set(sub_update)\n \n sales_taxes = conv_subtotal * 0.0925\n \n global taxAmount\n \n taxAmount.set(\"0\")\n \n tax_update = str(\"%.2f\" % sales_taxes)\n \n taxAmount.set(tax_update)\n \n \n \n global totalAmount\n \n orig_total = totalAmount.get()\n orig_total = float(orig_total)\n \n final_update = str(\"%.2f\" % (conv_subtotal + sales_taxes))\n \n totalAmount.set(final_update)\n \n global trans_counter\n origqty = int( trans_counter.get())\n origqty = origqty + answer\n\ndef tenderFunc():\n \n global cash_button, card_button, escape_button\n \n pcButton.grid_forget()\n lineVoid.grid_forget()\n transVoid.grid_forget()\n qtyButton.grid_forget()\n pcOverride.grid_forget()\n total_button.grid_forget()\n\n skuEntry.grid_forget()\n goButton.grid_forget()\n \n \n \n cash_image = PhotoImage(file=\"./img/tenders/cash.png\")\n cash_button = Button(btm_frame2, image=cash_image, command = cashPMT)\n cash_button.grid(row = 1, padx = 30);\n cash_button.image = cash_image\n \n card_image = PhotoImage(file=\"./img/tenders/card.png\")\n card_button = Button(btm_frame2, image=card_image, command = cardPMT)\n card_button.grid(row = 1, column = 2,padx = 30);\n card_button.image = card_image\n \n escape_image = PhotoImage(file=\"./img/tenders/exit.png\")\n escape_button = Button(btm_frame2, image=escape_image, command = tenderESC)\n escape_button.grid(row = 1, column = 7,padx = 30);\n escape_button.image = escape_image\n \n\ndef tenderESC():\n \n cash_button.grid_forget()\n card_button.grid_forget()\n escape_button.grid_forget()\n \n \n skuEntry.grid(row = 1, ipadx = 100, ipady=20, sticky='w') \n goButton.grid(row = 1, ipadx = 40, ipady = 20, column = 2);\n \n pcButton.grid(row = 1, ipadx = 40, ipady = 20, padx = 17);\n qtyButton.grid(row = 1, column = 2,ipadx = 40, ipady = 20, padx = 17);\n lineVoid.grid(row = 1, column = 3,ipadx = 40, ipady = 20, padx = 17);\n pcOverride.grid(row = 1, column = 4,ipadx = 40, ipady = 20, padx = 17);\n transVoid.grid(row = 1, column = 6,ipadx = 40, ipady = 20, padx = 17);\n total_button.grid(row = 1, column = 7, padx = 17);\n \ndef cashPMT():\n amount = simpledialog.askfloat(\"Amount\", \"Enter cash amount?\",\n parent=root)\n \n if (amount == \"\"):\n showerror(\"NO ENTRY\", \"You did not enter anything...?\")\n skulist = []\n \n for child in tree.get_children():\n \n skulist.append(str(tree.item(child)[\"values\"][0]) ) ## append elem at end\n print (skulist)\n \n uploadSKUs = ','.join(map(str, skulist)) \n \n if amount is not None and amount is not \"\":\n dueAMT = float(totalAmount.get())\n givenAMT = float(amount)\n \n if (givenAMT > dueAMT):\n give_change = True\n changeDUE = givenAMT - dueAMT\n \n try:\n cont = sqlite3.connect('./database/trans.db')\n with cont:\n global database_entry\n c = cont.cursor()\n \n c.execute('''INSERT INTO transactions (transactionTimeStamp, employeeID, skuList, total, pmtMethod)\n VALUES(?,?,?,?, ?)''', (timestamp.strftime(\"%m-%d-%Y %H:%M\"), employeeID,uploadSKUs, totalAmount.get(), 'CASH'))\n cont.commit()\n \n if (give_change):\n showwarning(\"CHANGE DUE:\", \"Please return change to customer:\\n$\" + str((\"%.2f\" % changeDUE)))\n total_button.config(state = 'disabled')\n \n \n cont.close()\n tree.delete(*tree.get_children())\n \n subAmount.set(0)\n taxAmount.set(0)\n totalAmount.set(0)\n trans_counter.set(\"0\")\n tenderESC()\n \n except Error as e:\n print(e)\n except ValueError:\n showerror(\"SKU ERROR\", \"SKU: \" + amount + \" not found... \\nPlease check the sku and try again.\")\n \n \n \n else:\n print (\"\")\n\n\n\n \n \ndef popupmsg():\n if not tree.selection():\n showerror(\"ERROR\", \"NO ITEM SELECTED\")\n exit\n \n else:\n global user_input, pass_input\n \n popup = Toplevel(root) \n #popup = Tk()\n #popup.wm_title(\"!\")\n \n frame2 = Frame()\n frame2.grid()\n \n #username label\n main_label = Label(popup, text=\"Manager Override:\", font=(\"Helvetica\", 24))\n main_label.grid(row=1, column=0, sticky=NSEW, padx = 20, pady = 20)\n \n #username label\n username_label = Label(popup, text=\"Username:\", font=(\"Helvetica\", 18))\n username_label.grid(row=2, column=0, sticky=W, padx = 20, pady = 20)\n \n #password label\n password_label = Label(popup, text=\"Password:\", font=(\"Helvetica\", 18))\n password_label.grid(row=3, column=0, sticky=W, padx = 15)\n \n \n \n user_input = StringVar()\n username_entry = Entry(popup, textvariable=user_input, font=\"Helvetica 20\")\n username_entry.grid(row=2, column=1, sticky=W, padx = 10, ipadx=70, ipady=10)\n\n pass_input = StringVar()\n password_entry = Entry(popup, textvariable=pass_input, show=\"*\", font=\"Helvetica 20\")\n password_entry.grid(row=3, column=1, sticky=W, padx = 10, pady = 10,ipadx=70, ipady=10)\n \n\n #submit button\n submit_button = Button(popup, text = \"Approve\", command = approve_override)\n submit_button.grid(row = 4, column = 1, ipadx = 20, ipady=20);\n \n #submit button\n submit_button = Button(popup, text = \"Cancel\", command = lambda: popup.destroy())\n submit_button.grid(row = 4, column = 0, ipadx = 20, ipady=20);\n\n \n popup.mainloop()\n\ndef approve_override():\n \n user_id = user_input.get()\n user_pw = pass_input.get()\n \n try:\n cont = sqlite3.connect('./database/users.db')\n with cont:\n global database_entry\n c = cont.cursor()\n print (user_id)\n c.execute('''SELECT emp_pass, rights FROM employee WHERE employee_id=?''', (user_id,))\n database_entry = c.fetchone()\n \n resultString = str(database_entry)\n \n resultString=resultString.replace(\"(\",\"\")\n resultString=resultString.replace(\")\",\"\")\n resultString=resultString.replace(\"\\'\",\"\")\n \n emp_pass1, rights = resultString.split(',', 1)\n rights = rights.replace(\" \", \"\")\n rights = int(rights)\n print (\"manager check:\" + str(rights))\n #resultString=resultString.replace(\",\",\"\")\n \n\n user_entry = user_pw\n user_entry = user_entry.encode(encoding='utf_8')\n \n \n sha = hashlib.sha1(user_entry)\n check1 = sha.hexdigest()\n \n print (\"hashed inputted pass: \" + check1)\n print (\"hashed db pass: \" + emp_pass1)\n \n if (check1 == emp_pass1 and rights == 2):\n original_price = tree.item(tree.selection())['values'][2]\n orig_qty = tree.item(tree.selection())['values'][3]\n \n orig_qty = int(orig_qty)\n original_price = float(original_price)\n \n new_price = simpledialog.askfloat(\"New Price\", \"Enter New Price:\",\n parent=root)\n \n if (new_price == \"\"):\n showerror(\"NO ENTRY\", \"You did not enter anything...?\")\n \n \n if new_price is not None and new_price is not \"\":\n \n tree.set(tree.selection(), 2, new_price)\n \n original_price = original_price * orig_qty\n \n global subAmount\n orig_subtotal = subAmount.get()\n conv_subtotal = float (orig_subtotal)\n \n conv_subtotal = conv_subtotal - original_price\n \n new_price = new_price * orig_qty \n conv_subtotal = conv_subtotal + new_price\n \n sub_update = str(\"%.2f\" % conv_subtotal)\n \n subAmount.set(sub_update)\n \n sales_taxes = conv_subtotal * 0.0925\n \n global taxAmount\n \n taxAmount.set(\"0\")\n \n tax_update = str(\"%.2f\" % sales_taxes)\n \n taxAmount.set(tax_update)\n \n \n \n global totalAmount\n \n orig_total = totalAmount.get()\n orig_total = float(orig_total)\n \n final_update = str(\"%.2f\" % (conv_subtotal + sales_taxes))\n \n totalAmount.set(final_update)\n \n else:\n showwarning(\"Incorrect Credentials\", \"Please try again.\")\n popup.destroy()\n \n cont.close()\n except ValueError:\n showwarning(\"Incorrect Credentials\", \"Please try again.\")\n \n\n \n \n \ndef cardPMT():\n number = simpledialog.askinteger(\"Card Info\", \"Enter 16-Digit Card Number: \\n --- Without Any Dashes or Spaces ---\",\n parent=root)\n \n if (number == \"\"):\n showerror(\"NO ENTRY\", \"You did not enter anything...?\")\n skulist = []\n \n for child in tree.get_children():\n \n skulist.append(str(tree.item(child)[\"values\"][0]) ) ## append elem at end\n print (skulist)\n \n uploadSKUs = ','.join(map(str, skulist)) \n \n if number is not None and number is not \"\":\n \n \n \n try:\n cont = sqlite3.connect('./database/trans.db')\n with cont:\n global database_entry\n c = cont.cursor()\n number= str(number)\n submission = hashlib.sha1(number.encode('utf-8')).hexdigest()\n c.execute('''INSERT INTO transactions (transactionTimeStamp, employeeID, skuList, total, pmtMethod, pmtInfo)\n VALUES(?,?,?,?,?, ?)''', (timestamp.strftime(\"%m-%d-%Y %H:%M\"), employeeID,uploadSKUs, totalAmount.get(), 'CARD', submission))\n cont.commit()\n\n total_button.config(state = 'disabled')\n \n \n cont.close()\n tree.delete(*tree.get_children())\n \n subAmount.set(0)\n taxAmount.set(0)\n totalAmount.set(0)\n trans_counter.set(\"0\")\n tenderESC()\n \n except Error as e:\n print(e)\n except ValueError:\n showerror(\"SKU ERROR\", \"SKU: \" + \" not found... \\nPlease check the sku and try again.\")\n \n \n \n else:\n print (\"\")\n\n \n \n \n \ndef lnVoid():\n selection = tree.selection() ## get selected item\n\n sel_sku = tree.item(tree.selection())['values'][0]\n sel_desc = tree.item(tree.selection())['values'][1]\n sel_price = tree.item(tree.selection())['values'][2]\n sel_qty = tree.item(tree.selection())['values'][3]\n \n if messagebox.askokcancel(\"Void Line\", \"Confirm Void Entry: \\nSKU:\" + str(sel_sku) + \"\\nItem: \" + sel_desc + \"\\nPrice: \" + str(sel_price)):\n conv_sel = float(sel_price)\n conv_qty = int(sel_qty)\n \n conv_sel = conv_sel * conv_qty\n \n global subAmount\n orig_subtotal = subAmount.get()\n conv_subtotal = float (orig_subtotal)\n conv_subtotal = conv_subtotal - conv_sel\n \n sub_update = str(\"%.2f\" % conv_subtotal)\n \n subAmount.set(sub_update)\n \n sel_taxes = conv_sel * 0.0925\n \n global taxAmount\n orig_tax = taxAmount.get()\n conv_tax = float (orig_tax)\n conv_tax = conv_tax - sel_taxes\n \n tax_update = str(\"%.2f\" % conv_tax)\n \n taxAmount.set(tax_update)\n \n \n \n global totalAmount\n sel_total = conv_sel+sel_taxes\n orig_total = totalAmount.get()\n orig_total = float(orig_total)\n \n update = orig_total - sel_total\n final_update = str(\"%.2f\" % update)\n \n totalAmount.set(final_update)\n \n global trans_counter\n origqty = int( trans_counter.get())\n origqty = origqty - conv_qty\n \n \n trans_counter.set(str(origqty))\n \n tree.delete(selection)\n \n if not tree.get_children():\n total_button.config(state = 'disabled')\n\ndef clear_search(event): \n skuEntry.delete(0, END) \n \ndef handle_click(event):\n if tree.identify_region(event.x, event.y) == \"separator\":\n return \"break\"\n\n\n\n\ndef edit():\n x = tree.get_children()\n for item in x: ## Changing all children from root item\n tree.item(item, text=\"blub\", values=(\"foo\", \"bar\"))\n\n\n\ndef fetchData():\n \n tree.focus_set()\n \n\n if inputSKU.get() is not None and inputSKU.get() is not \"\":\n \n try:\n cont = sqlite3.connect('./database/SKU.db')\n with cont:\n global database_entry\n c = cont.cursor()\n c.execute('''SELECT Description, Price FROM skus WHERE SKU=?''', (inputSKU.get(),))\n database_entry = c.fetchone()\n resultString = str(database_entry)\n resultString=resultString.replace(\"(\",\"\")\n resultString=resultString.replace(\")\",\"\")\n resultString=resultString.replace(\"\\'\",\"\")\n #resultString=resultString.replace(\",\",\"\")\n \n description, price = resultString.split(',', 1)\n \n price = float(price)\n \n global subAmount\n \n subAmount1 = float(subAmount.get())\n subAmount1 = subAmount1 + price\n subAmount2 = str(\"%.2f\" % subAmount1)\n subAmount.set((subAmount2))\n \n global taxAmount\n taxAmount0 = float(taxAmount.get())\n taxAmount1 = (price * 0.0925) \n \n taxAmount2 = taxAmount0 + taxAmount1\n taxAmount3 = str((\"%.2f\" % taxAmount2))\n \n taxAmount.set(taxAmount3)\n \n global totalAmount\n \n totalAmount0 = float(totalAmount.get())\n totalAmount1 = totalAmount0 + price + taxAmount1\n \n totalAmount.set((\"%.2f\" % totalAmount1))\n \n \n \n tree.insert('', 'end', text = inputSKU.get(), values=(inputSKU.get(),description,price,'1'))\n \n global trans_counter\n trans_counter0 = trans_counter.get()\n trans_counter1 = int(trans_counter0)\n \n trans_counter1 = trans_counter1 + 1\n trans_counter2 = str(trans_counter1)\n \n trans_counter.set(trans_counter2)\n skuEntry.delete(0, END)\n skuEntry.insert(0, \"Click Here to Enter a SKU\") \n total_button.config(state = 'normal')\n cont.close()\n except Error as e:\n print(e)\n except ValueError:\n showerror(\"SKU ERROR\", \"SKU not found... \\nPlease check the sku and try again.\")\n skuEntry.delete(0, END)\n skuEntry.insert(0, \"Click Here to Enter a SKU\") \n else:\n skuEntry.delete(0, END)\n skuEntry.insert(0, \"Click Here to Enter a SKU\") \n \n \n\ndef priceCheck():\n answer = simpledialog.askstring(\"Price Lookup\", \"What is the SKU of the item?\",\n parent=root)\n \n if (answer == \"\"):\n showerror(\"NO ENTRY\", \"You did not enter anything...?\")\n \n if answer is not None and answer is not \"\":\n \n try:\n cont = sqlite3.connect('./database/SKU.db')\n with cont:\n global database_entry\n c = cont.cursor()\n c.execute('''SELECT Description, Price FROM skus WHERE SKU=?''', (answer,))\n database_entry = c.fetchone()\n resultString = str(database_entry)\n resultString=resultString.replace(\"(\",\"\")\n resultString=resultString.replace(\")\",\"\")\n resultString=resultString.replace(\"\\'\",\"\")\n #resultString=resultString.replace(\",\",\"\")\n \n description, price = resultString.split(',', 1)\n \n \n showinfo(\"SKU: \" + answer, description + \"\\nCost: $\" + price)\n \n \n cont.close()\n except Error as e:\n print(e)\n except ValueError:\n showerror(\"SKU ERROR\", \"SKU: \" + answer + \" not found... \\nPlease check the sku and try again.\")\n \n else:\n print (\"\")\n \n \n\n\nroot = buildFrame()\nroot.mainloop()\n\n\n\n"
},
{
"alpha_fraction": 0.5395772457122803,
"alphanum_fraction": 0.5527321696281433,
"avg_line_length": 30.996402740478516,
"blob_id": "e7707bb55b35ffead428363a1354416c8da5bb31",
"content_id": "2e40b1dea8813ed21f94314f022be5a5acf6b343",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8894,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 278,
"path": "/Login_screenn.py",
"repo_name": "izaazk/tkinterPOS",
"src_encoding": "UTF-8",
"text": "'''\n@author: Izaaz Kothawala\n@date: 03/26/2018\n@class: ITMD 413\n@Lab: 08\n\n\n\n'''\nimport datetime\nimport locale\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.messagebox import showinfo, showwarning, showerror\nimport uuid\nimport hashlib\nimport sqlite3\nfrom sqlite3 import Error\nfrom sqlite3 import Cursor\nfrom _ctypes import alignment\nfrom tkinter import simpledialog\nimport sys\n\n\n\n\n\ndef buildFrame () :\n \n global user_input, pass_input, root, username_entry\n \n root = Tk()\n root.geometry(\"1000x500\")\n \n frame1 = Frame(root)\n frame1.pack()\n \n \n root.resizable(False, False)\n root.title(\"POS Login Screen\")\n root.option_add('*font', 'Helvetica -20')\n \n img = PhotoImage(file=\"./img/logo.png\")\n bg_img = PhotoImage(file=\"./img/bg.png\")\n\n lbl1 = Label(root, image=bg_img)\n lbl1.image = bg_img\n lbl1.place(x=0, y=0, relwidth=1, relheight=1) \n \n lbl = Label(root, image=img)\n lbl.image = img\n lbl.pack(fill = X, pady=15);\n \n frame2 = Frame(root)\n frame2.pack(side = LEFT, expand=FALSE, padx=20, pady=5)\n \n #username label\n username_label = Label(frame2, text=\"Username:\", font=(\"Helvetica\", 24))\n username_label.grid(row=1, column=0, sticky=W, padx = 20, pady = 20)\n \n #password label\n password_label = Label(frame2, text=\"Password:\", font=(\"Helvetica\", 24))\n password_label.grid(row=2, column=0, sticky=W, padx = 15)\n \n #input for username\n user_input = StringVar()\n \n username_entry = Entry(frame2, textvariable=user_input, font=\"Helvetica 20\")\n username_entry.grid(row=1, column=1, sticky=W, padx = 10, ipadx=70, ipady=10)\n \n #input for password\n pass_input = StringVar()\n \n password_entry = Entry(frame2, textvariable=pass_input, show=\"*\", font=\"Helvetica 20\")\n password_entry.grid(row=2, column=1, sticky=W, padx = 10, pady = 10,ipadx=70, ipady=10)\n \n #submit button\n login_image = PhotoImage(file=\"./img/login.png\")\n submit_button = Button(frame2, image=login_image, command = fetchRecord)\n submit_button.grid(row = 3, column = 1);\n submit_button.image = login_image\n \n #clear button\n clear_image = PhotoImage(file=\"./img/clear.png\")\n clear_button = Button(frame2, image=clear_image, command = clearFields)\n clear_button.grid(row = 3, padx=10, pady=10 );\n clear_button.image = clear_image \n \n \n frame3 = Frame(root)\n frame3.pack(side = TOP, expand=TRUE)\n \n #pricecheck button\n pricecheck = PhotoImage(file=\"./img/pricecheck.png\")\n pc_button = Button(frame3, image=pricecheck, command = priceCheck)\n pc_button.grid();\n pc_button.image = pricecheck \n \n frame4 = Frame(root)\n frame4.pack(side = BOTTOM, expand=TRUE)\n #inventory button\n inventoryLookup = PhotoImage(file=\"./img/inventory.png\")\n inventoryL = Button(frame4, image=inventoryLookup, command = inventoryCheck)\n inventoryL.grid();\n inventoryL.image = inventoryLookup \n \n return root\n\ndef clearFields():\n user_input.set(\"\")\n pass_input.set(\"\")\n username_entry.focus()\n\ndef fetchRecord():\n userID = user_input.get()\n try:\n cont = sqlite3.connect('./database/users.db')\n with cont:\n global database_entry\n c = cont.cursor()\n c.execute('''SELECT emp_pass FROM employee WHERE employee_id=?''', (userID,))\n database_entry = c.fetchone()\n resultString = str(database_entry)\n resultString=resultString.replace(\"(\",\"\")\n resultString=resultString.replace(\")\",\"\")\n resultString=resultString.replace(\"\\'\",\"\")\n resultString=resultString.replace(\",\",\"\")\n \n\n user_entry = pass_input.get()\n user_entry = user_entry.encode(encoding='utf_8')\n secure (user_entry, resultString)\n \n cont.close()\n except Error as e:\n # if (userID == \"adminIIT\") and (pass_input == \"IITadmin\"):\n # showinfo(\"OFFLINE ACCOUNT\", \"LOCAL ACCOUNT LOGGED IN SUCCESSFULLY\")\n print (e)\n \ndef secure(user_entry, resultString):\n sha = hashlib.sha1(user_entry)\n check1 = sha.hexdigest()\n \n if (check1 == resultString):\n #showinfo(\"Success\", \"Login verified\")\n getUserInfo()\n else:\n showwarning(\"GANDU\", \"GANDU\")\n\ndef getUserInfo():\n userID = user_input.get()\n try:\n cont = sqlite3.connect('./database/users.db')\n with cont:\n global database_entry\n c = cont.cursor()\n c.execute('''SELECT emp_name, rights FROM employee WHERE employee_id=?''', (userID,))\n database_entry = c.fetchone()\n resultString = str(database_entry)\n resultString=resultString.replace(\"(\",\"\")\n resultString=resultString.replace(\")\",\"\")\n resultString=resultString.replace(\"\\'\",\"\")\n #resultString=resultString.replace(\",\",\"\")\n \n emp_name, rights = resultString.split(',', 1)\n \n rights = rights.replace(\" \", \"\")\n \n fo = open(\"./database/active_user\", \"w+\")\n fo.writelines([userID+\"\\n\", emp_name+\"\\n\", rights+\"\\n\"])\n fo.close()\n \n root.destroy()\n success = 1\n if (success == 1):\n rights2 = int(rights)\n print (rights2)\n if(rights2 == 2):\n import adminChoice\n adminChoice.root\n elif(rights2 == 1):\n import main\n main.root\n cont.close()\n except Error as e:\n # if (userID == \"adminIIT\") and (pass_input == \"IITadmin\"):\n # showinfo(\"OFFLINE ACCOUNT\", \"LOCAL ACCOUNT LOGGED IN SUCCESSFULLY\")\n print (e)\n \n \n\ndef on_closing():\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n root.destroy()\n \ndef priceCheck():\n answer = simpledialog.askstring(\"Price Lookup\", \"What is the SKU of the item?\",\n parent=root)\n \n if (answer == \"\"):\n showerror(\"NO ENTRY\", \"You did not enter anything...?\")\n \n if answer is not None and answer is not \"\":\n \n try:\n cont = sqlite3.connect('./database/SKU.db')\n with cont:\n global database_entry\n c = cont.cursor()\n c.execute('''SELECT Description, Price FROM skus WHERE SKU=?''', (answer,))\n database_entry = c.fetchone()\n resultString = str(database_entry)\n resultString=resultString.replace(\"(\",\"\")\n resultString=resultString.replace(\")\",\"\")\n resultString=resultString.replace(\"\\'\",\"\")\n #resultString=resultString.replace(\",\",\"\")\n \n description, price = resultString.split(',', 1)\n \n \n showinfo(\"SKU: \" + answer, description + \"\\nCost: $\" + price)\n \n \n cont.close()\n except Error as e:\n print(e)\n except ValueError:\n showerror(\"SKU ERROR\", \"SKU: \" + answer + \" not found... \\nPlease check the sku and try again.\")\n \n else:\n print (\"\")\n \n \ndef inventoryCheck():\n answer = simpledialog.askstring(\"Inventory Lookup\", \"What is the SKU of the item?\",\n parent=root)\n \n if (answer == \"\"):\n showerror(\"NO ENTRY\", \"You did not enter anything...?\")\n \n if answer is not None and answer is not \"\":\n \n try:\n cont = sqlite3.connect('./database/SKU.db')\n with cont:\n global database_entry\n c = cont.cursor()\n c.execute('''SELECT Description, Quantity FROM skus WHERE SKU=?''', (answer,))\n database_entry = c.fetchone()\n resultString = str(database_entry)\n resultString=resultString.replace(\"(\",\"\")\n resultString=resultString.replace(\")\",\"\")\n resultString=resultString.replace(\"\\'\",\"\")\n #resultString=resultString.replace(\",\",\"\")\n \n description, stock = resultString.split(',', 1)\n \n \n showinfo(\"SKU: \" + answer, description + \"\\nOn-Hand Quantity: \" + stock)\n \n \n cont.close()\n except Error as e:\n print(e)\n except ValueError:\n showerror(\"SKU ERROR\", \"SKU: \" + answer + \" not found... \\nPlease check the sku and try again.\")\n \n \n \n\nroot = buildFrame()\nroot.mainloop()\n\n\n\nlocale.setlocale( locale.LC_ALL, '' )\ntimestamp = datetime.datetime.now()\nprint((\"\\nAuthor: Izaaz Kothawala\") + (\"\\nLab 08\") + (\"\\nTimestamp: \") + timestamp.strftime(\"%m-%d-%Y %H:%M\"))"
}
] | 5 |
viliwonka/messenger
|
https://github.com/viliwonka/messenger
|
a60ca0998c2637af732db4f4af51dfa7a503decf
|
0c6c2068f764965678d34f69a23bd4b58833c771
|
62c5a64b7878a407d342976f4cfb0dd824e3b886
|
refs/heads/master
| 2020-06-20T06:39:37.929289 | 2017-03-02T09:16:56 | 2017-03-02T09:16:56 | 74,897,858 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6477524042129517,
"alphanum_fraction": 0.6512895822525024,
"avg_line_length": 31.161136627197266,
"blob_id": "5c02857093203945d25755438d6dfa1d8fa49a2e",
"content_id": "48399be742c14dc2c3a76af663bf8ca30cd5c691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6785,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 211,
"path": "/messenger_Server/code/chatMain.py",
"repo_name": "viliwonka/messenger",
"src_encoding": "UTF-8",
"text": "import psycopg2\nimport sys\nimport json\n\nimport datetime\nimport asyncio\nfrom asyncio import coroutine\nfrom os import environ\n\nfrom autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner\n\nimport sqlQueries\n\ntokenToUsernameDict = {}\n#WAMP API DEFINITIONS\nclass MyComponent(ApplicationSession):\n\n\tasync def onJoin(self, details):\n\t\t\n\t\tglobal tokenToUsernameDict\n\n\t\tprint(\"session joined\")\n\t\t# can do subscribes, registers here e.g.:\n\t\t# yield from self.subscribe(...)\n\t\t# yield from self.register(...)\n\t\n\t\t# just example code for date\n # def utcnow():\n # now = datetime.datetime.utcnow()\n # return now.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\t\tdef usernameExists(username):\n\t\t\tif type(username) is str:\n\t\t\t\ttry:\n\t\t\t\t\tif sqlQueries.userExists(username):\n\t\t\t\t\t\tprint(\"userExists, True\") \n\t\t\t\t\t\treturn \"true\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"userExists, False\")\n\t\t\t\t\t\treturn \"false\"\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"Error, exception:\" + str(e))\n\t\t\t\t\treturn \"error\"\t\n\t\t\telse:\n\t\t\t\tprint(\"Error \\\"usernameExists\\\": username or password are not strings\")\n\t\t\t\treturn \"error\"\n\n\t\tdef registerNewUser(username, password, countryCode):\n\t\t\tif type(username) is str and type(password) is str and len(username) >= 6 and len(username) <= 32 and len(password) >= 6 and len(password)<= 40:\n\t\t\t\tprint(\"----------------------------\")\n\t\t\t\tprint(\"Username: \" + username)\n\t\t\t\tprint(\"Password: \" + password)\n\t\t\t\tprint(\"countryCode \" + countryCode)\n\t\t\t\tprint(\"----------------------------\")\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tif not sqlQueries.userExists(username):\n\t\t\t\t\t\tresult = sqlQueries.registerUser(username, password, countryCode)\n\n\t\t\t\t\t\tprint(\"Result:\" + str(result))\n\t\t\t\t\t\tprint(\"User {} with pw \\\"{}\\\" has registered sucessfully\".format(username, password))\n\t\t\t\t\t\treturn \"ok\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"User {} already exists.\".format(username))\n\t\t\t\t\t\treturn \"alreadyExists\"\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"Error \\\"registerNewUser\\\" exception:\" + str(e))\n\t\t\t\t\treturn \"error\"\t\n\t\t\telse:\n\t\t\t\tprint(\"Error \\\"registerNewUser\\\": username or password are not strings\")\n\t\t\t\treturn \"error\"\n\n\t\tdef login(username, password):\n\t\t\tglobal tokenToUsernameDict\n\t\t\tif type(username) is str and type(password) is str:\n\t\t\t\ttry:\n\t\t\t\t\t#preverimo, ali user obstaja in kasneje preverimo, ce se geslo ujema\n\t\t\t\t\tif sqlQueries.userExists(username):\n\n\t\t\t\t\t\t(success, token) = sqlQueries.authenticateUser(username, password)\n\n\t\t\t\t\t\tif success is True:\n\t\t\t\t\t\t\t#zapomni si zeton, zato da bo user lahko vedno posiljal svoj zeton\n\t\t\t\t\t\t\ttokenToUsernameDict[token] = username\n\t\t\t\t\t\t\tprint(\"User {} with pw \\\"{}\\\" has logined sucessfully\".format(username, password))\t\t\t\t\t\n\t\t\t\t\t\t\treturn [\"ok\", token]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Wrong auth with username {}.\".format(username))\n\t\t\t\t\t\t\treturn [\"wrong\", \"\"]\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Username {} does not exists.\".format(username))\n\t\t\t\t\t\treturn [\"wrong\", \"\"]\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"Error \\\"login\\\", exception:\" + str(e))\n\t\t\t\t\treturn [\"error\", \"\"]\t\n\t\t\telse:\n\t\t\t\tprint(\"Error \\\"login\\\": username or password are not strings\")\n\t\t\t\treturn [\"error\",\"\"]\n\n\t\t#register functions\n\t\tawait self.register(usernameExists, u'com.usernameExists')\n\t\tawait self.register(registerNewUser, u'com.registerNewUser')\n\t\tawait self.register(login, u'com.login')\n\t\t\n\t\t########################################################################\n\t\t#FRIENDSHIPPING\n\t\t########################################################################\n\t\t#search for friendship with this query, returns list of friends\n\t\t#username of person that is doing query\n\t\tdef searchRooms(username, query):\n\t\t\tpass\n\t\t\t\n\t\tdef searchUsers(username, query):\n\t\t\treturn sqlQueries.searchUsername(username, query)\n\t\t\n\t\tawait self.register(searchUsers, u'com.searchUsers')\n\t\t\n\t\t#request friendship with this query\n\t\t#username of person that is doing query\n\t\t#targetPerson is person that we are requesting friendship from\n\t\tdef requestFriendship(username, targetPerson):\n\n\t\t\t(status, origCaller) = sqlQueries.getFriendshipStatus(username, targetPerson)\n\n\t\t\t#target person already sent request\n\t\t\tif status == sqlQueries.fRequest and origCaller == targetPerson:\n\t\t\t\tsqlQueries.setFriendshipStatus(username, targetPerson, sqlQueries.fAccepted)\n\t\t\telse:\n\t\t\t\tsqlQueries.setFriendshipStatus(username, targetPerson, sqlQueries.fRequest)\n\n\t\t\treturn True\n\n\t\tawait self.register(requestFriendship, u'com.requestFriendship')\n\t\t#accept or ignore friend with this query\n\t\t#username of person that is doing accept/ignore to targetPerson\n\t\t#uses tables: user, friendship, participates, chatroom\n\t\tdef acceptFriendship(username, targetPerson, acceptElseIgnore):\n \n\t\t\t(status, origCaller) = sqlQueries.getFriendshipStatus(username, targetPerson)\n\n\t\t\tif status == sqlQueries.fRequest and origCaller == targetPerson:\n\t\t\t\tsqlQueries.setFriendshipStatus(username, targetPerson, sqlQueries.fRequest)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\tawait self.register(acceptFriendship, u'com.acceptFriendship')\n\t\t#gets list of all friends and also their status (friended, waitingYouForAccept, waitingHimForAccept)\n\t\tdef getAllFriends(username):\n\t\t\treturn sqlQueries.getAllFriends(username)\n\n\t\tawait self.register(getAllFriends, u'com.getAllFriends')\n\t\t#########################################################################\n\t\t#MESSAGING\n\t\t#########################################################################\n\t\t#send message to room/friend, also save it to database\n\t\tdef sendMessage(chatroomName, username, text, timestamp):\n\t\t\treturn False\n\n\t\tdef getMessages(chatroomName, username, from_timestamp, to_timestamp):\n\t\t\tpass\n\n\t\t#########################################################################\n\t\t#ROOMS\n\t\t########################################################################\n\n\t\t#not an actual query, but useful function that converts friendship chat to room\n\t\tdef friendshipToRoomName(friend1, friend2):\n\t\t\tarr = [friend1, friend2]\n\t\t\tarr.sort()\n\t\t\treturn arr[0] + \"____\" + arr[1]\n\n\t\t#uses tables: user, participates, chatroom\n\t\tdef createRoom(username, roomName):\n\t\t\t#ignore now\n\t\t\tpass\n\n\t\t#uses tables: user, participates, chatroom\n\t\tdef joinRoom(username, roomName):\n\t\t\t#ignore now\n\t\t\tpass\n\n\t\t#find rooms that are public\n\t\t# uses tables: chatroom\n\t\tdef searchPublicRoom(username, query):\n\t\t\t#ignore now\n\t\t\tpass\n\n\t\tdef inviteToRoom(username, roomName, targetPerson):\n\t\t\t#ignore now\n\t\t\tpass\n\n\t\tsqlQueries.setFriendshipStatus(\"kmetkmet\", \"drekdrek\", \"Accepted\")\n\t\tprint(str(sqlQueries.getAllFriends(\"kmetkmet\")))\nif __name__ == '__main__':\n\n\tprint(\"Connecting to database\")\n\n\ttry:\n\t\tsqlQueries.connectDatabase()\n\n\texcept Exception as e:\n\t\tprint(e)\n\n\tprint(\"Running server component.\")\n\n\ttry: \n\t\trunner = ApplicationRunner(url=u\"ws://127.0.0.1:8080/ws\", realm=u\"realm1\")\n\t\trunner.run(MyComponent)\n\n\texcept Exception as e:\n\t\tprint(e)"
},
{
"alpha_fraction": 0.7111501097679138,
"alphanum_fraction": 0.7142230272293091,
"avg_line_length": 20.685714721679688,
"blob_id": "da5799a18f1f1d9f0a5a46e7f3d31f4bc4ecd3d2",
"content_id": "4500f1a95eabc81b2083c29ca512aeb0f19c7cb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 2278,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 105,
"path": "/messenger_Server/code/sqlFiles/podatkovni_model_table_create.sql",
"repo_name": "viliwonka/messenger",
"src_encoding": "UTF-8",
"text": "CREATE TABLE public.user (\n username text NOT NULL,\n password_hash varchar(128) NOT NULL,\n lastonline timestamp without time zone NOT NULL,\n registertime timestamp with time zone NOT NULL,\n countrycode text NOT NULL,\n status text,\n PRIMARY KEY (username)\n);\n\nCREATE INDEX ON public.user\n (countrycode);\n\n\nCOMMENT ON COLUMN public.user.status\n IS 'NULL,\nKicked,\nBanned';\n\nCREATE TABLE public.friendship (\n friendshipid serial NOT NULL,\n username_1 text NOT NULL,\n username_2 text NOT NULL,\n status text NOT NULL,\n PRIMARY KEY (friendshipid)\n);\n\nCREATE INDEX ON public.friendship\n (username_1);\nCREATE INDEX ON public.friendship\n (username_2);\n\n\nCREATE TABLE public.message (\n messageid bigserial NOT NULL,\n chatroomid integer NOT NULL,\n username text NOT NULL,\n text text NOT NULL,\n timeposted timestamp without time zone NOT NULL,\n PRIMARY KEY (messageid)\n);\n\nCREATE INDEX ON public.message\n (chatroomid);\nCREATE INDEX ON public.message\n (username);\n\n\nCREATE TABLE public.country (\n code text NOT NULL,\n name text NOT NULL,\n PRIMARY KEY (code)\n);\n\nALTER TABLE public.country\n ADD UNIQUE (name);\n\n\nCREATE TABLE public.activitydata (\n activitydataid bigserial NOT NULL,\n username text NOT NULL,\n time timestamp without time zone NOT NULL,\n ip integer NOT NULL,\n agentdata text NOT NULL,\n description text NOT NULL,\n PRIMARY KEY (activitydataid)\n);\n\nCREATE INDEX ON public.activitydata\n (username);\n\n\nCREATE TABLE public.chatroom (\n chatroomid serial NOT NULL,\n name text,\n roomtype text NOT NULL,\n lastmessage integer NOT NULL,\n firstmessage integer NOT NULL,\n PRIMARY KEY (chatroomid)\n);\n\nALTER TABLE public.chatroom\n ADD UNIQUE (name);\n\nCREATE INDEX ON public.chatroom\n (lastmessage);\nCREATE INDEX ON public.chatroom\n (firstmessage);\n\n\nCOMMENT ON COLUMN public.chatroom.roomtype\n IS 'Public, Private/Invite only, Invite disabled, Friend';\n\nCREATE TABLE public.participates (\n participatesid serial NOT NULL,\n username text NOT NULL,\n chatroomid integer NOT NULL,\n userprivilege text,\n PRIMARY KEY (participatesid)\n);\n\nCREATE INDEX ON public.participates\n (username);\nCREATE INDEX ON public.participates\n (chatroomid);\n\n"
},
{
"alpha_fraction": 0.8392156958580017,
"alphanum_fraction": 0.8392156958580017,
"avg_line_length": 35.57143020629883,
"blob_id": "f2b497546e7f972f53e7819e11f5c191c901994b",
"content_id": "a745c4ab0bb8b5e2917e801a728fb3dd1138ff83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 7,
"path": "/messenger_Server/code/sqlFiles/podatkovni_model_table_drop.sql",
"repo_name": "viliwonka/messenger",
"src_encoding": "UTF-8",
"text": "DROP TABLE public.user CASCADE;\nDROP TABLE public.friendship CASCADE;\nDROP TABLE public.message CASCADE;\nDROP TABLE public.country CASCADE;\nDROP TABLE public.activitydata CASCADE;\nDROP TABLE public.chatroom CASCADE;\nDROP TABLE public.participates CASCADE;"
},
{
"alpha_fraction": 0.6513487696647644,
"alphanum_fraction": 0.6593459248542786,
"avg_line_length": 23.9375,
"blob_id": "c9ea67e6daedd4318a3ebb6d24f1ceb256d8bd04",
"content_id": "855ec66db513da35c5c65aa30aad3391840c0531",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8380,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 336,
"path": "/messenger_Server/code/sqlQueries.py",
"repo_name": "viliwonka/messenger",
"src_encoding": "UTF-8",
"text": "import psycopg2\nimport sys\nimport json\nimport hashlib\nimport bcrypt\n\nimport datetime\nimport asyncio\nfrom asyncio import coroutine\nfrom os import environ\n\nfrom autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner\nimport random\nimport string\n\ndbConn = None\n\ndef connectDatabase():\n\n\tglobal dbConn\n\n\tconn_string = \"host='localhost' dbname='messenger' user='postgres' password='rolebole777'\"\n\tprint(\"Connecting to database\\n ->%s\" % (conn_string))\n\tdbConn = psycopg2.connect(conn_string)\n\n\tprint(\"Connected!\")\n\ndef disconnectDatabase():\n\tdbConn.close()\n# TO JE TUKAJ SAMO ZA INFO O TABELI\n\"\"\"\nCREATE TABLE public.user (\n username varchar(64) NOT NULL,\n password_hash varchar(64) NOT NULL,\n email varchar(255) NOT NULL,\n lastonline timestamp without time zone NOT NULL,\n countryid integer NOT NULL,\n registertime timestamp with time zone NOT NULL,\n status varchar(32),\n PRIMARY KEY (username)\n);\n\"\"\"\n\n#return TRUE OR FALSE\ndef userExists(username):\n\n\tSQL = \"\"\"SELECT count(username) FROM public.User GROUP BY username HAVING (username) = %(username)s \"\"\"\n\tdata = { 'username': username }\n\n\twith dbConn.cursor() as curs:\n\t\tcurs.execute(SQL, data)\n\t\tdata = curs.fetchall()\n\t\tdbConn.commit()\n\t\tif len(data) == 0:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n#return TRUE OR FALSE\ndef authenticateUser(username, password):\n\n\tSQL = \"\"\"SELECT password_hash FROM public.User WHERE username = %(username)s\"\"\"\n\t\n\tdata = { 'username': username }\n\n\twith dbConn.cursor() as curs:\n\t\tcurs.execute(SQL, data)\n\t\tdata = curs.fetchall()\n\t\tdbConn.commit()\n\n\t\tif len(data) == 0:\n\t\t\tprint(\"len(data) == 0 in authenticateUser\")\n\t\t\treturn False\n\n\t\tpassword_hash = data[0][0]\n\n\t\tprint(\"HERE:\" + str(data) + \"..\" + str(type(data)))\n\n\t\tencoded = password_hash.encode('UTF_8')\n\n\t\tsuccess = (bcrypt.hashpw(password.encode('UTF_8'), encoded) == encoded)\n\n\t\tif success:\n\t\t\ttoken = username.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(8))\n\t\t\treturn (True, token)\n\t\telse:\n\n\t\t\treturn (False, '')\n\n#return True or False\ndef registerUser(username, password, countrycode):\n\n\tSQL = \"\"\"INSERT INTO public.User (username, password_hash, lastonline, countrycode, registertime, status) VALUES (%(username)s, %(hash)s, %(reg_time)s, %(countryid)s, %(reg_time)s, %(status)s)\"\"\"\n\n\tpw_hash = bcrypt.hashpw(password.encode('UTF_8'), bcrypt.gensalt())\n\n\tnow = datetime.datetime.utcnow()\n\tregisterTime = now.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n\tdata = {\n\n\t\t'username'\t\t: username, \n\t\t'hash' \t\t\t: str(pw_hash, 'UTF_8'),\n\t\t'reg_time' \t\t: registerTime,\n\t\t'countryid'\t\t: countrycode,\n\t\t'status'\t\t: \"NULL\"\n\t}\n\n\ttry:\n\t\twith dbConn.cursor() as curs:\n\t\t\tcurs.execute(SQL, data)\n\t\t\tprint(str(curs.fetchall()))\n\t\t\tdbConn.commit()\n\t\t\treturn True\n\texcept Exception as e:\n\n\t\tprint(str(e))\n\n\t\treturn False\n\t\n########################################################################\n#FRIENDSHIP\n########################################################################\n#search for friendship with this query, returns list of friends\n#username of person that is doing query\ndef searchUsername(searcher, query):\n\t\t\n\tSQL =\"\"\"SELECT public.User.username, public.Country.name\n\t\t\tFROM public.User INNER JOIN public.Country\n\t\t\tON (public.User.CountryCode = public.Country.Code)\n\t\t\tWHERE username LIKE %(query)s\n\t\t\tAND username != %(searcher)s\n\t\t\tORDER BY username\n\t\t \"\"\"\n\n\tdata = {\n\t\t'query' \t: ('%' + query + '%'),\n\t\t'searcher' \t: searcher\n\t}\n\n\ttry:\n\t\twith dbConn.cursor() as curs:\n\t\t\tcurs.execute(SQL, data)\n\n\t\t\tsearchResult = curs.fetchall()\t\n\n\t\t\t#print(\"searchUsername: \\n\" + str(searchResult))\n\t\t\tdbConn.commit()\n\n\t\t\treturn [(x[0],x[1]) for x in searchResult]\n\texcept Exception as e:\n\t\tprint(\"searchUsername error: \" + str(e))\n\t\treturn []\n\n# request friendship with this query\n# username of person that is doing query\n# targetPerson is person that we are requesting friendship from\n\nfNone = \"Empty\"\nfRequest = \"Request\"\nfIgnore = \"Ignore\"\nfAccepted = \"Accepted\"\n\nfriendStatus = [fRequest, fIgnore, fAccepted, fNone]\n\ndef setFriendshipStatus(thisUsername, targetUsername, status):\n\n\tglobal friendStatus\n\n\t#error checking\n\tif status not in friendStatus:\n\t\traise Exception(\"Wrong status\")\n\n\tarr = [thisUsername, targetUsername]\n\t#sortiraj, da bo vedno v istem vrstnem redu\n\tarr.sort()\n\t\n\tcallIndex = 0\n\n\tif(arr[1] == thisUsername):\n\t\tcallIndex = 1\n \n\tdata = {\n\t\t'UserOne' : arr[0],\n\t\t'UserTwo' : arr[1],\n\t\t'Status' : status + \"_\" + str(callIndex)\n\t}\n\tprint(\"status: \" + status)\n\t#najprej preverimo, če obstaja zapis\n\tSQL_exists = \"\"\"SELECT * FROM public.friendship AS fs WHERE fs.Username_1 = %(UserOne)s AND fs.Username_2 = %(UserTwo)s\"\"\" \n\n\trecordExists = False\n\n\twith dbConn.cursor() as curs:\n\n\t\tcurs.execute(SQL_exists, data)\n\t\tif len(curs.fetchall()) == 0:\n\t\t\trecordExists = False\n\t\telse:\n\t\t\trecordExists = True\n\t\tdbConn.commit()\n\t\t\n\tSQL = \"\"\n\n\tif recordExists:\n\n\t\tSQL = \"\"\"UPDATE public.friendship AS fs SET Status = %(Status)s\n\t\t\t\t WHERE fs.Username_1 = %(UserOne)s AND fs.Username_2 = %(UserTwo)s \"\"\"\n\t\tprint(\"update\")\n\telse:\n\t\tSQL = \"\"\"INSERT INTO public.friendship (Username_1, Username_2, Status)\n\t\t\t\t VALUES (%(UserOne)s, %(UserTwo)s, %(Status)s)\"\"\"\n\t\tprint(\"insert\")\n\t# preverimo ce je status sploh pravi\n\n\twith dbConn.cursor() as curs:\n\n\t\tcurs.execute(SQL, data)\n\t\tdbConn.commit()\n\ndef getFriendshipStatus(thisUsername, targetUsername):\n\n\tglobal fNone\n\n\tarr = [thisUsername, targetUsername]\n\t#sortiraj, da bo vedno v istem vrstnem redu\n\tarr.sort()\n\t\n\tdata = {\n\t\t'UserOne' : arr[0],\n\t\t'UserTwo' : arr[1]\n\t}\n\t\n\t#najprej preverimo, če obstaja zapis\n\tSQL = \"\"\"SELECT Username_1, Username_2, Status FROM public.friendship AS fs WHERE fs.Username_1 = %(UserOne)s AND fs.Username_2 = %(UserTwo)s\"\"\" \n\n\twith dbConn.cursor() as curs:\n\n\t\tcurs.execute(SQL, data)\n\t\tdbConn.commit()\n\n\t\tfetched = curs.fetchall()\n\n\t\tprint(\"len:\" + str(len(fetched)))\n\t\tprint(\"str:\" + str(fetched))\n\t\t\n\t\tif len(fetched) == 0:\n\t\t\treturn (fNone,0)\n\t\telif len(fetched) == 1:\n\t\t\n\t\t\tuser0 = fetched[0][0]\n\t\t\tuser1 = fetched[0][1]\n\t\t\tstat = fetched[0][2]\n\n\t\t\tsplitted = stat.split(\"_\")\n\n\t\t\tstatus \t = splitted[0]\n\t\t\tcallIndex = splitted[1] #\n\n\t\t\tif callIndex == 0:\n\t\t\t\treturn (status, user0)\n\t\t\telse:\n\t\t\t\treturn (status, user1)\n\t\telse:\n\t\t\traise Exception(\"WTF two records, something is not right man\")\n\n# accept or ignore friend with this query\n# username of person that is doing accept/ignore to targetPerson\n# uses tables: user, friendship, participates, chatroom\ndef acceptFriendship(thisUsername, targetPerson):\n\tglobal fAccepted\n\tsetFriendshipStatus(thisUsername, targetPerson, fAccepted)\n\ndef ignoreFriendship(thisUsername, targetPerson):\n\tglobal fIgnore\n\tsetFriendshipStatus(thisUsername, targetPerson, fIgnore)\n\ndef requestFriendship(thisUsername, targetPerson):\n\tglobal fRequest\n\tsetFriendshipStatus(thisUsername, targetPerson, fRequest)\n\n# gets list of all friends and also their status (friended, waitingYouForAccept, waitingHimForAccept)\ndef getAllFriends(username):\n\t\n\tSQL = \"\"\"SELECT Username_1, Username_2, Status FROM public.friendship \n\t\t\tWHERE (Username_1 = %(username)s OR Username_2 = %(username)s)\"\"\"\n\n\tdata = { 'username' : username }\n\n\ttry:\n\t\twith dbConn.cursor() as curs:\n\n\t\t\tcurs.execute(SQL, data)\n\t\t\tfetched = curs.fetchall()\n\t\t\tdbConn.commit()\n\n\t\t\treturn fetched\n\texcept Exception as e:\n\t\tprint(\"getAllFriends exception: \\n\" + str(e))\n\t\treturn []\n\n# ########################################################################\n# MESSAGING\n# ########################################################################\n# send message to room/friend, also save it to database\ndef saveMessage(chatroomName, username, text, timestamp):\n\tpass\n\ndef getMessages(chatroomName, username, from_timestamp, to_timestamp):\n\tpass\n\n# ########################################################################\n# ROOMS\n# #######################################################################\n\n#not an actual query, but useful function that converts \ndef friendshipToRoomName(friend1, friend2):\n\tarr = [friend1, friend2]\n\tarr.sort()\n\treturn arr[0] + \"____\" + arr[1]\n\n#uses tables: user, participates, chatroom\ndef createRoom(username, roomName):\n\tpass\n\n#uses tables: user, participates, chatroom\ndef joinRoom(username, roomName):\n\tpass\n\n#find rooms that are public\n# uses tables: chatroom\ndef searchPublicRoom(username, query):\n\tpass\n\ndef inviteToRoom(username, roomName, targetPerson):\n\tpass"
}
] | 4 |
pvop/Nixae
|
https://github.com/pvop/Nixae
|
d743972ee56981c721b597719373a64f08b09c95
|
3f3e5f0a77575987dd084027bb0082af3f29ffc5
|
214042da0f25bdf051002168bd081527419d6337
|
refs/heads/master
| 2020-09-06T13:07:37.377267 | 2019-11-10T14:13:54 | 2019-11-10T14:13:54 | 220,432,297 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6071711778640747,
"alphanum_fraction": 0.6295562386512756,
"avg_line_length": 37.24242401123047,
"blob_id": "90c8dc7bf715718da00d7a2113544e99803a6cf0",
"content_id": "d5d7eaaa49961262927052637332b015b65c5053",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5048,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 132,
"path": "/Nixae.py",
"repo_name": "pvop/Nixae",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\n\nimport tensorflow as tf\nfrom keras.models import Model\nfrom keras.layers.convolutional import (\n Conv2D\n)\nfrom keras.layers.merge import add\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.layers import RepeatVector, Flatten, Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, \\\n ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation, concatenate, Lambda, Permute, Multiply,Conv1D,MaxPool1D,AvgPool1D\n\ndef _1D_bn_relu(input):\n \"\"\"Helper to build a BN -> relu block\n do batch normalization in channel axis\n \"\"\"\n norm = BatchNormalization(axis=2)(input) \n return Activation(\"relu\")(norm)\ndef _2D_bn_relu(input):\n \"\"\"Helper to build a BN -> relu block\n do batch normalization in height axis\n \"\"\"\n norm = BatchNormalization(axis=2)(input)\n return Activation(\"relu\")(norm)\n\n\ndef _conv_bn_relu(**conv_params):\n \"\"\"Helper to build a conv -> BN -> relu block\n \"\"\"\n filters = conv_params[\"filters\"]\n kernel_size = conv_params[\"kernel_size\"]\n strides = conv_params.setdefault(\"strides\", (1, 1))\n padding = conv_params.setdefault(\"padding\", \"same\")\n kernel_regularizer = conv_params.setdefault(\"kernel_regularizer\", l2(0.0003))\n\n def f(input):\n conv = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=strides, padding=padding,\n kernel_regularizer=kernel_regularizer)(input)\n return _2D_bn_relu(conv)\n\n return f\n\n\n\n\ndef _shortcut(input, residual):\n \"\"\"Adds a shortcut between input and residual block and merges them with \"sum\"\n \"\"\"\n return add([input, residual])\n\n\n\nclass Nixae_Builder(object):\n @staticmethod\n def build(input_shape, num_outputs, C=50, brn=5):\n \"\"\"Builds a custom Nixae like architecture.\n\n Args:\n input_shape: The input shape in the form (packet_length)\n num_outputs: The number of outputs at final softmax layer\n C: The number of filters in the first 1D convolutional layer of the Inception block.\n brn: The number of branches to use\n Returns:\n The keras `Model`.\n \"\"\"\n\n if K.image_dim_ordering() == 'tf':\n input_shape = (input_shape[0])\n input = Input(shape=input_shape, dtype=tf.int32)\n\n cast_int = Lambda(lambda x: K.cast(x, dtype=tf.int32))\n my_input_one_hot = Lambda(lambda x: K.one_hot(cast_int(x), num_classes=256))\n output = my_input_one_hot(input)\n output = Reshape([1, input_shape[0], 256])(output)\n\n ###the 2D-conv in the first layer of 1*256\n conv1 = _conv_bn_relu(filters=32, kernel_size=(1, 256), strides=(1, 1), padding=\"valid\")(output)\n output_layer_1 = Reshape([input_shape[0],32])(conv1)\n\n\n ###begin of Inception\n\n first_brn_filter = C\n second_brn_filter = 32\n residuals = []\n for i in range(brn):\n output_1 = Conv1D(filters=first_brn_filter, kernel_size=i+1,strides=1,padding=\"same\",activation=\"relu\")(output_layer_1)\n output_1 = _1D_bn_relu(Conv1D(filters=second_brn_filter, kernel_size=1, strides=1, padding=\"same\")(output_1))\n if i==0:\n _, a, b = output_1.shape.as_list() # a=packet_length,b=C0\n output_1 = Reshape([a, b, 1])(output_1)\n residuals.append(output_1)\n ###end of Inception\n ###begin of attention\n my_sum_fn = Lambda(lambda x: K.sum(x, axis=2))\n my_reshape_length = Lambda(lambda x: K.reshape(x, [-1, brn]))\n residual = concatenate(residuals, axis=3)\n alpha = my_sum_fn(residual) # packet_length*brn\n alpha = my_reshape_length(alpha)\n Dense_layer_1 = Dense(brn * brn, activation=\"relu\", use_bias=False)\n Dense_layer_2 = Dense(units=brn, activation=\"softmax\")\n\n alpha = Dense_layer_1(alpha)\n alpha = Dense_layer_2(alpha) # (batch_size*packet_length)*brn\n my_reshape_length_2 = Lambda(lambda x: K.reshape(x, [-1, a, 1, brn]))\n alpha = my_reshape_length_2(alpha) # batch_size*packet_length*1*brn\n output = Multiply()([alpha, residual])\n finnal_sum = Lambda(lambda x: K.sum(x, axis=-1))\n output = finnal_sum(output)\n output_layer_2 = Reshape([a, b])(output)\n output_layer_2 = _shortcut(output_layer_1, output_layer_2)\n output_layer_2 = _1D_bn_relu(output_layer_2)\n ###end of attention\n\n\n output = AvgPool1D()(output_layer_2)\n\n flatten1 = Flatten()(output)\n flatten1 = Dropout(rate=0.5)(flatten1)\n dense = Dense(units=256, activation=\"relu\")(flatten1)\n dense = Dense(units=128, activation=\"relu\")(dense)\n dense = Dense(units=num_outputs, activation=\"softmax\")(dense)\n model = Model(inputs=input, outputs=dense)\n return model\n\n @staticmethod\n def build_model(input_shape, num_outputs, C, brn):\n return Nixae_Builder.build(input_shape, num_outputs, C, brn)\n"
}
] | 1 |
Etienne-Meunier/MLRG-19
|
https://github.com/Etienne-Meunier/MLRG-19
|
8fa88d8e2acb33afc14179de639cfce7194c9af1
|
deed45446ee155052f7f941996c2e86cc3dbd449
|
e812af61fbddddfbe502b5861b741101ff23593f
|
refs/heads/master
| 2020-08-10T09:17:29.712423 | 2020-04-11T09:33:07 | 2020-04-11T09:33:07 | 214,314,408 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6035328507423401,
"alphanum_fraction": 0.6211972236633301,
"avg_line_length": 25.8157901763916,
"blob_id": "e3f299165314da4f577cd01ef62e5324593277dd",
"content_id": "a3abc89e4aaa46f75a982b3016e82ac094e1e501",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1019,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 38,
"path": "/notebooks/Find_Eigenvalues.py",
"repo_name": "Etienne-Meunier/MLRG-19",
"src_encoding": "UTF-8",
"text": "# In this notebook we use symbolic calculation to find all critical points of a function and the eigenvalues associated. This way we can know the shape of the maxima / minima\nimport sympy as sy\nimport plotly.graph_objs as go\n\nx,y = sy.symbols('x y')\nsy.init_printing(use_unicode=True)\n\n#%% Define Function\nf = x**4+y**2-x*y # function 2 from Stanford\n#f = 4*x + 2*y - x**2 -3*y**2\n\nf\n\ndf_dy = sy.diff(f,y)\ndf_dx = sy.diff(f,x)\n\ndf_dx\n#%% Find critical points\ncr =sy.nonlinsolve([df_dx,df_dy],[x,y])\nprint('critical points',cr)\ncr\n#%% build hessian\ne = sy.hessian(f,[x,y])\ne\n\n#%% Find eigenvalues for each of the critical points\nfor c in cr :\n xv = c[0]\n yv = c[1]\n print('Critical point : \\n\\tx : {} \\n\\ty : {}'.format(xv.evalf(),yv.evalf()))\n eigs = list(e.subs({x:xv,y:yv}).eigenvals().keys())\n if eigs[0] > 0 and eigs[1] > 0 :\n print('Concave up')\n elif eigs[0] < 0 and eigs[1] < 0 :\n print('Concave down')\n else :\n print('Saddle Point')\n print('Eigen Values : ',eigs)\n"
},
{
"alpha_fraction": 0.7489386200904846,
"alphanum_fraction": 0.7764376401901245,
"avg_line_length": 83.94261932373047,
"blob_id": "cebd5611595e167364c1aac254ed4335d7e1ed43",
"content_id": "339380456d6c428022b02f9bd7a8425c3e6dd878",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10368,
"license_type": "no_license",
"max_line_length": 1302,
"num_lines": 122,
"path": "/Readme.md",
"repo_name": "Etienne-Meunier/MLRG-19",
"src_encoding": "UTF-8",
"text": "---\ntypora-copy-images-to: ./images\n---\n\n# MLRG Notes\n\nNote taking during UBC [Machine Learning reading group]([http://www.cs.ubc.ca/labs/lci/mlrg/](http://www.cs.ubc.ca/labs/lci/mlrg/)) and extensions to knowledge. \n\n## Week 1 :Generalization of Neural Networks\n\n\n\n## Week 2 : Sharp Minima Generalize Poorly\n\n**Key Points :** \n\n- Large Batch (LB ) Training lead to sharp minima and Small Batch (SB) training leads to large minima\n- Define what is a Large Minima / Sharp Minima and how to measure sharpness ( 2 ways )\n- Sharp Minima are generalizing less good than large minima\n- Techniques like data augmentation can improve generalization without influencing sharpness. \n\n> The lack of generalization ability is due to the fact that large-batch methods tend to convergetosharp minimizersof the training function. These minimizers are characterized by a signif-icant number of large positive eigenvalues in∇2f(x), and tend to generalize less well. Incontrast, small-batch methods converge toflat minimizerscharacterized by having numeroussmall eigenvalues of∇2f(x). We have observed that the loss function landscape of deep neuralnetworks is such that large-batch methods are attracted to regions with sharp minimizers andthat, unlike small-batch methods, are unable to escape basins of attraction of these minimizers.\n\n**Notes :** \n\nThis talk rely essentially on paper [1] that study the influence of the size of the Batch both on the Generalization and the \"Sharpness\" of the mini ma in the case of Stochastic Gradient Descent ( SGD ).\n\n<u>Stochastic Gradient Descent :</u> \n\nGradient descent is an iterative method for optimizing objective differentiable function. In the classic version we use the actual gradient of the dataset ( The error for the all dataset is computed ). The stochastic version use an estimate calculated on a random part of the dataset ( the concept of a batch).\n\n$$\nx_{k+1}=x_{k}-\\alpha_{k}\\left(\\frac{1}{\\left|B_{k}\\right|} \\sum_{i \\in B_{k}} \\nabla f_{i}\\left(x_{k}\\right)\\right)\n$$\n\nWhere $B_k$ is the batch.\n\nThe stochastic gradient descent guarantees : \n\n- (a) convergence to minimizers of strongly-convex functions and to stationary points ( local optima ) for non-convex functions (Bottou et al., 2016)\n- (b) saddle-pointavoidance (Ge et al., 2015; Lee et al., 2016)\n- (c) robustness to input data (Hardt et al., 2015).\n\n<u>Size of the Batch :</u> \n\nRuntimeError: Expected object of backend CUDA but got backend CPU for argument #2 'mat2'The size of the Batch is a parameter we can choose we using SGD, it can speed up the computation but it pratice some experiments showed that augmenting to much the size o the batch could lead to a loss in accuracy. \n\n<u>Sharpness of the Minima :</u>\n\nThe sharpness of the minima is defined by Hochreiter & Schmidhuber in 1997. A Flat minimizer is one where we can modify the parameters ( here in x ) in a relatively large neighborhood without the loss function to increase to much and a sharp one is the opposite basically. \n\n \n\nAn interesting thing is that we can measure the Sharpness of a minimizer by the magnitude for the eigenvalues of $\\nabla^{2} f(x)$ . \n\n**Classic Technique to measure the Sharpness :**\n\nReminder : The first derivative of a function indicate how that function change if we change the input ( if I node a bit x what will happen with f(x) ) and the second derivative is the derivative of this derivative that indicate how this variation change with x ( like the acceleration of a vehicle is the second derivative of a movement ). So if the second derivative is positive then the first derivative is increasing when x is increasing, the function is *concave up* if the second derivative is negative the function is concave down. Also the higher is the absolute value of a the second derivative is the faster the function is increasing creating a sharper spike. Here we have a function $R^n\\rightarrow R$ so we cannot directly take the directly the derivative. We use the Hessian matrix, then we can take the eigenvalues of the Hessian to interpolate the explanation we done for second degree derivative. If the Hessian is positive definite (eigenvalues all positive ) at a point x ( here the parameters ) then we are at a local minimum, if it is negative definite we are at a local maximum and if it is a mix we are at a saddle point. We can use the magnitudes of the eigenvalues to have the sharpness of the minima for each of it's input dimension. [Notebook](notebooks/Find_Eigenvalues.py)\n\n[4] Use that metric ---> ==DESCRIBE==\n\n-> Publication that use that metric\n\n**Their Technique :** \n\nApparently computing the eigenvalues of the Hessian is to expensive when we have a lot of parameters so they use another technique to compute sharpness. Their technique modify slightly the parameters in the neighborhood of the minima and check the maximum loss they can reach if they reach a big loss it is likely that the loss function was globally increasing around. Because this metrics is using max it is sensible to a outliers in the loss function ( if we have a spike ) they do maximization on random manifold.\n\nThe sharpness of the function f at point x is defined as : \n\n\n\n<u>Visualizing the loss-parameters space :</u>\n\nNow we have a definition and a metrics for sharp minima it is natural to be interested in visualizing the loss evolution in the parameter space. [2] introduce a technique to visualize in one dimension the evolution of the loss depending on the variation of the parameters. In order to do that they evaluate $J(\\theta)$ for a series of points $\\theta = (1-\\alpha)\\theta_0+\\alpha\\theta_1$, this allow to draw a line in the parameter space between $\\theta_0$ and $\\theta_1$. In this paper they used it using $\\theta_0$ as the original parameters and $\\theta_1$ as the final optimized one with a lower loss. \n\n\n\nOn this figure we see the loss associated with this \"straight line\" in the parameter space. At $\\alpha = 0$ it is the original parameters and at $\\alpha =1$ the optimized one. We can see that the parameters behave well because the closer we get in the parameter space the smaller the loss and when we get far again the loss is increasing. This can of technique can allow us to evaluate the \"cost of the path\" between 2 set of parameters and understand why the optimizer went that way or another way to minimize the loss. Also it can help us to compare 2 set of parameters and understand if they belong to the same minima. It can also help us to understand why a network took time to converge. \n\n\n\n\n\nThis graph for example show us that for this given network there is a flat area in loss improvement between the initial an optimal parameters in the parameter space, on the right it show the evolution of the loss depending of the epoch number during training and we can see that the optimizer sucessfuly avoided this plateau area and finished at the global optimum, showing that he didn't took a straight line. It also show similar results for train and test error and so no sign of overfit. \n\n<img src=\"images/1570817994395.png\" alt=\"1570817994395\" style=\"zoom:50%;\" />\n\nThis is an illustration on how we can compare the landscape between 2 optimal parameters of the model, here $\\theta_0$ is the first optimal solution and $\\theta_1$ is the second one, we can see that even if they are equivalent in loss the line between those 2 local optima encounter an higher barrier cost. It is super important to understand this graph because they are used a lot in the studied paper to observe the \"Shape\" of the minima founded and the difference between local minima.\n\nSo now, If we focus more on our original paper, they use this representation to study the difference between 2 local minima, one trained with large batch size in $\\theta_l$ and one with small batch size in $\\theta_s$ . \n$$\n\\text{We plot : }f(\\alpha\\theta_l+(1-\\alpha)\\theta_s)\n$$\nSo when $\\alpha=0$ we have optimal parameters with small batch and $\\alpha=1$ we have optimal parameters with large batch.\n\n\n\nWe can first see on this graph that the 2 minima are not linked by a straight line ( like a valley ) because the loss is increasing when we explore the linear space between them. Also we can observer the shape of the minima. They done that for several types of networks. \n\nAnother thing we have to take in account is that the Sharpness can be evaluated for each dimension of the parameter space individually so a minima can be sharp for one parameters and flat for another, one way to visualize that is in the previous notebook. They demonstrate that by taking different random sampling in their evaluation. \n\n<u>Main differences between results using LB and using SB :</u> \n\nUsing their sharpness metric and the accuracy of the network, it is now possible to manually test ==the impact of the sharpness of generalization and the impact of the batch size on both.== \n\n\n\nWe can see here on this 2 networks that globally increasing the batch size increase the sharpness of the minima founded and the hurt generalization ( decreasing of the testing accuracy ). \n\nThey also present techniques where we increase gradually the batch size with the training and that seems to work. \n\nApparently **Data Augmentation**, **Conservative Training** and **Robust Training** are techniques that help to improve performances with large batch and reduce the sharpness of the peaks. \n\nApart from this paper, it is important to note that other experimentation show that having large batch size can also be an advantage in the case we have a noisy labeling or outliers within the dataset. I also personally observed that on practical examples with noisy datasets. \n\n[[1](https://arxiv.org/abs/1609.04836)] On Large-Batch Training for Deep Learning: Generalization Gap and Sharp Minima\n\n[[2](https://arxiv.org/abs/1412.6544)] Qualitatively characterizing neural network optimization problems\n\n[[3](https://arxiv.org/abs/1705.10694)] Deep Learning is Robust to Massive Label Noise\n\n[[4](https://arxiv.org/abs/1802.08241)] Hessian-based Analysis of Large Batch Training and Robustness to Adversaries\n\n"
}
] | 2 |
jennuinecode/thejennuinelife_django
|
https://github.com/jennuinecode/thejennuinelife_django
|
a3111e974590e263e73f655dd667431664633557
|
d350bfcf48b0fc11fa9b0b40b5f109c6bb878be8
|
b8fe5c48052699d6f7899fb26d5cfbcaec4bdcb1
|
refs/heads/master
| 2021-01-19T07:52:46.631669 | 2017-05-12T15:15:19 | 2017-05-12T15:15:19 | 87,114,405 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7241379022598267,
"alphanum_fraction": 0.7241379022598267,
"avg_line_length": 16.60714340209961,
"blob_id": "fb4d8abce5a54bf2f4048da2fe12cfa877f9a6a1",
"content_id": "a3fa955e2e7e29074fd5cdbaacf25c2ccc348aec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 28,
"path": "/apps/website/views.py",
"repo_name": "jennuinecode/thejennuinelife_django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse\n\n\ndef index(request):\n\n return render(request, 'website/index.html')\n\ndef about(request):\n\n return render(request, 'website/about.html')\n\n\ndef photography(request):\n\n return render(request, 'website/photography.html')\n\n\ndef design(request):\n\n return render(request, 'website/design.html')\n\ndef blog(request):\n\n return render(request, 'blog/index.html')\n\ndef contact(request):\n\n return render(request, 'website/contact.html')\n"
},
{
"alpha_fraction": 0.7608200311660767,
"alphanum_fraction": 0.7653758525848389,
"avg_line_length": 20.950000762939453,
"blob_id": "4cdccd089a709b4e49887f86ab4d60b8a90ff0a8",
"content_id": "47d7275029572fcdaa43cc0b23c36c7b7c42f4a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/apps/blog/views.py",
"repo_name": "jennuinecode/thejennuinelife_django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom . models import Blog\n\ndef index(request):\n\n return render(request, 'blog/index.html')\n\n\ndef youcodegirl(request):\n\n return render(request, 'blog/posts/thegrind/youcodegirl.html')\n\ndef myphotographystory(request):\n\n return render(request, 'blog/posts/thegrind/myphotographystory.html')\n\n\ndef fitnessstorypt1(request):\n\n return render(request, 'blog/posts/thegrind/fitnessstorypt1.html')\n"
},
{
"alpha_fraction": 0.6988636255264282,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 38.11111068725586,
"blob_id": "2547f227b7f61949160c6e75af2f011611b9fd74",
"content_id": "4d0910d1a09d5f96478627e359007491943ebb2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 9,
"path": "/apps/blog/models.py",
"repo_name": "jennuinecode/thejennuinelife_django",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\n\nclass Blog(models.Model):\n title = models.CharField(max_length=100)\n category = models.CharField(max_length=100)\n entry = models.TextField(max_length = 4000)\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n"
},
{
"alpha_fraction": 0.47311827540397644,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 14.5,
"blob_id": "23d4ded21432df1fbcb9d21ab3dab6ce27f257b6",
"content_id": "f3cc2ebdf5bcc37c1738371f0718e47952a7a8cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "jennuinecode/thejennuinelife_django",
"src_encoding": "UTF-8",
"text": "bcrypt==3.1.3\ncffi==1.9.1\nDjango==1.10.4\ndjango-extensions==1.7.6\npycparser==2.17\nsix==1.4.1\n"
},
{
"alpha_fraction": 0.6139053106307983,
"alphanum_fraction": 0.61834317445755,
"avg_line_length": 38.764705657958984,
"blob_id": "075181f06d6d94e2946aaad782a846a0adcf3392",
"content_id": "46dbff0154594b0ccc3497ef559682f3e53899cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 676,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 17,
"path": "/apps/blog/urls.py",
"repo_name": "jennuinecode/thejennuinelife_django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\napp_name = \"blog\"\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^youcodegirl$', views.youcodegirl, name=\"youcodegirl\"),\n url(r'^fitnessstorypt1$', views.fitnessstorypt1, name=\"fitnessstorypt1\"),\n url(r'^myphotographystory$', views.myphotographystory, name=\"myphotographystory\"),\n # url(r'^join/(?P<id>\\d+)$', views.join, name=\"join\"),\n # url(r'^edit/(?P<id>\\d+)$', views.edit, name=\"edit\"),\n # url(r'^drop/(?P<id>\\d+)$', views.drop, name=\"drop\"),\n # url(r'^remove/(?P<id>\\d+)$', views.remove, name=\"remove\"),\n # url(r'^confirm/(?P<id>\\d+)$', views.confirm, name=\"confirm\"),\n\n]\n"
}
] | 5 |
lledinh/ENSEEIHT-Cours-Python
|
https://github.com/lledinh/ENSEEIHT-Cours-Python
|
94fc5de1639472abc25fb084d998462d0de63c57
|
875aa4057665b66fb2882170c04f442911bcadc3
|
6e6468c0475cadeecc5f19bfef2cbf4bb4ef1e75
|
refs/heads/main
| 2022-12-20T11:57:57.821170 | 2020-10-27T09:34:09 | 2020-10-27T09:34:09 | 307,645,241 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8260869383811951,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 23,
"blob_id": "65ce051d9f06f2040c5ec0febf972c64d609dd66",
"content_id": "8127f6651dee43482191bf7381e95069aa8c319c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 1,
"path": "/README.md",
"repo_name": "lledinh/ENSEEIHT-Cours-Python",
"src_encoding": "UTF-8",
"text": "# ENSEEIHT-Cours-Python"
},
{
"alpha_fraction": 0.5619524121284485,
"alphanum_fraction": 0.5744680762290955,
"avg_line_length": 27.33333396911621,
"blob_id": "e3208f5877601f76eb93f3cfeee868ff97c87b12",
"content_id": "5de480af187a18164cd2aa4fe25a729689ee6c5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4027,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 141,
"path": "/jeu_devin.py",
"repo_name": "lledinh/ENSEEIHT-Cours-Python",
"src_encoding": "UTF-8",
"text": "import random\n\ndef jeu_du_devin_utilisateur():\n ''' R0 Jouer au jeu du devin - l'utilisateur devine '''\n\n # R1 Comment jouer au jeu devin\n #\n # Choisir un nombre compris entre 1 et 999\n # solution: out\n # Faire deviner ce nombre\n # solution: in\n # nombre_essai: out\n # Féliciter le joueur\n # nombre_essai: in\n\n # R2 Comment Faire deviner ce nombre\n #\n # trouvé = false\n # nombre_essai = 0\n # while non trouvé:\n # Demander la proposition de l'utilisateur\n # proposition: out\n # Incrémenter le nombre d'essai\n # nombre_essai: inout\n # Donner un indice \n # solution: in\n # proposition: in\n\n\n # Choisir un nombre compris entre 1 et 999\n nombre_devin = random.randint(1, 999)\n nombre_joueur = None\n nombre_essai = 0\n\n \n while nombre_devin != nombre_joueur:\n # Mettre à jour le nombre d'essai\n nombre_essai += 1\n\n # Demander un nombre au joueur\n nombre_joueur = int(input(\"Propostion {0}\\n\".format(nombre_essai)))\n\n if (nombre_joueur > nombre_devin):\n print(\"Le nombre est plus petit.\")\n elif (nombre_joueur < nombre_devin):\n print(\"Le nombre est plus grand.\")\n print()\n # Afficher le nombre d'essais\n print(\"Vous avez trouvé en {0} essai(s)!\".format(nombre_essai))\n\n \ndef jeu_du_devin_machine():\n ''' R0 Jouer au jeu du devin - l'utilisateur devine '''\n\n # R1 Comment jouer au jeu devin - la machine devine\n #\n # Faire choisir un nombre compris entre 1 et 999\n #\n # Deviner ce nombre\n # nombre_essai: out\n #\n # Indiquer le nombre d'essai.\n # nombre_essai: in\n\n # R2 Comment Deviner ce nombre\n #\n # trouvé = false\n # nombre_essai = 0\n # while non trouvé:\n # Choisir un nombre\n # proposition: out\n # Incrémenter le nombre d'essai\n # nombre_essai: inout\n # Récupérer un indice \n # proposition: in\n # trouve: out\n\n # R3 Comment Choisir un nombre\n # nombre_ordi = borne_inf + (borne_sup - borne_inf) // 2\n # Mettre à jour l'intervalle définissant où est situé le nombre\n # \n\n # R4 Comment Mettre à jour l'intervalle définissant où est situé le nombre\n # borne_sup -= (borne_sup - borne_inf) // 2\n # borne_inf += (borne_sup - borne_inf) // 2\n\n nombre_choisi = False\n\n while not nombre_choisi:\n reponse = input(\"Avez-vous choisi un nombre? (o/n)\\n\")\n nombre_choisi = reponse in ('o', 'O', 'y', 'Y')\n\n if not nombre_choisi:\n print (\"J'attends\")\n\n nombre_essai = 0\n borne_sup = 1000\n borne_inf = 0\n nombre_trouve = False\n\n while not nombre_trouve:\n # Choisir un nombre\n nombre_ordi = borne_inf + (borne_sup - borne_inf) // 2\n # Incrémenter le nombre d'essai\n nombre_essai += 1\n\n print(\"Je choisis {0}.\".format(nombre_ordi))\n reponse = input(\"Trop grand, trop petit, trouvé?\\n\")\n\n # Récupérer un indice \n if reponse == 'g':\n # Mettre à jour l'intervalle définissant où est situé le nombre\n borne_sup -= (borne_sup - borne_inf) // 2\n elif reponse == 'p':\n # Mettre à jour l'intervalle définissant où est situé le nombre\n borne_inf += (borne_sup - borne_inf) // 2\n elif reponse == 't':\n nombre_trouve = True\n\n # Afficher le nombre d'essais\n print(\"J'ai trouvé en {0} essai(s)!\".format(nombre_essai))\n\n\nif __name__ == \"__main__\":\n\n continer_jeu = True\n\n while continer_jeu:\n print (\"1- L'ordinateur choisit un nombre et vous le devinez\")\n print (\"2- Vous choissez un nombre et l'ordinateur le devine\")\n print (\"0- Quitter le programme\")\n\n choix = int(input())\n\n if choix == 1:\n jeu_du_devin_utilisateur()\n elif choix == 2:\n jeu_du_devin_machine()\n else:\n print (\"Au revoir...\")\n continer_jeu = False\n"
}
] | 2 |
microiva178/django_learn
|
https://github.com/microiva178/django_learn
|
b9761d119dfafca310e20230f8b58006b0c3b102
|
29002200d47159c3c50b126bc2be738c75de94bf
|
6ec97cba6f97e716bca0500fe278e8f2743d4dad
|
refs/heads/master
| 2021-02-28T17:34:48.786509 | 2020-03-07T23:24:28 | 2020-03-07T23:24:28 | 245,717,966 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6005747318267822,
"alphanum_fraction": 0.6005747318267822,
"avg_line_length": 28.08333396911621,
"blob_id": "697d24bb3e24b69efcf19a259b44fa54e233e321",
"content_id": "515030f1eb6e5916c21c87b80de027ebbd08d9a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 348,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 12,
"path": "/fakeindex/page/views.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .forms import testform\n\ndef page(request):\n if request.method == \"POST\":\n form = testform(request.POST)\n if form.is_valid():\n post = form.save(commit = False)\n post.save()\n else:\n form = testform()\n return render(request, 'page/page.html', {'form': form})"
},
{
"alpha_fraction": 0.6839080452919006,
"alphanum_fraction": 0.6839080452919006,
"avg_line_length": 20.875,
"blob_id": "b4cb55f42d0ff23b5782d6edba664ce58ee3c7f5",
"content_id": "4093465819f2ad7a93661b71df530bacf6f95f8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 8,
"path": "/fakeindex/page/forms.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom .models import fortest\n\nclass testform(forms.ModelForm):\n\n class Meta:\n model = fortest\n fields = ('testlogin', 'testpassword')"
},
{
"alpha_fraction": 0.7077922224998474,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 29.799999237060547,
"blob_id": "86f84cc9a603ec186eb2718282edff16bb79d52c",
"content_id": "4a251de8b84f3f08fbc3f3720e983cf54a6e1c00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 5,
"path": "/chatroom/venv/bin/django-admin.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "#!/home/userf/Desktop/chattest/venv/bin/python3\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n"
},
{
"alpha_fraction": 0.6366197466850281,
"alphanum_fraction": 0.6366197466850281,
"avg_line_length": 28.58333396911621,
"blob_id": "16990c1626bdf10c83fdb67e6c80165d627ba0ab",
"content_id": "12f7c77a6d03b93ee4653a726a81a45dc4c64473",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 710,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 24,
"path": "/chatroom/page/views.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import Post\nfrom .forms import PostForm\nfrom django.shortcuts import redirect\n\ndef page(request):\n posts = Post.objects.all()\n return render(request, 'page/page.html', {'posts': posts})\n\ndef message(request):\n posts = Post.objects.all()\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('message')\n else:\n form = PostForm()\n return render(request, 'page/message.html', {'form': form, 'posts': posts})\n\ndef REDIRECT(request):\n return redirect('message')\n"
},
{
"alpha_fraction": 0.6526315808296204,
"alphanum_fraction": 0.6526315808296204,
"avg_line_length": 22.75,
"blob_id": "4227e3e5dfb7c603714431eb8de3ff73fe083463",
"content_id": "2bedf5d7e1b3579f10173e5d90fc516ef7c77b54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 8,
"path": "/fakeindex/page/urls.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n#from django.conf.urls import url\n\nurlpatterns = [\n path('', views.page, name='page'),\n #url('', views.Page.as_view(), name='Page'),\n]\n"
},
{
"alpha_fraction": 0.7051281929016113,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 25,
"blob_id": "ca11ceb16413d10ea74a07965474870dd5a2b741",
"content_id": "22a76cba248bfd8eb8806627983c65e59917e7dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 234,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/fakeindex/page/models.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.conf import settings\n\nclass fortest(models.Model):\n testlogin = models.CharField(max_length=30)\n testpassword = models.CharField(max_length=30)\n\n def saveit(self):\n self.save()\n"
},
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 21.75,
"blob_id": "41539e5d24ed3e233456df6a10caeaa07dd864c6",
"content_id": "f3fccb804e279f1bfccf2aea54b9b57d5844fb5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 4,
"path": "/fakeindex/page/admin.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import fortest\n\nadmin.site.register(fortest)"
},
{
"alpha_fraction": 0.6574074029922485,
"alphanum_fraction": 0.6574074029922485,
"avg_line_length": 23.11111068725586,
"blob_id": "3f7a5cfc686aa964c11a2f29e026e0f61c4cd6bd",
"content_id": "df4f0c20075075754bbd542c01e814d2a34d9d65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/chatroom/page/urls.py",
"repo_name": "microiva178/django_learn",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n #path('', views.page, name='page'),\n path('', views.message, name='message'),\n path('accounts/profile/', views.REDIRECT, name='REDIRECT')\n\n]"
}
] | 8 |
sthanhng/testci
|
https://github.com/sthanhng/testci
|
f405c62ebb6eff4ab674a50ab5807f5683b180cb
|
71641e11fa55378a6c0a865df2d906d6efa0c45b
|
8d1d07d70d914a26aa07a04e00cb8a6f062e86e1
|
refs/heads/master
| 2022-11-22T05:39:22.248796 | 2020-07-14T03:49:51 | 2020-07-14T03:49:51 | 279,468,577 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.512499988079071,
"alphanum_fraction": 0.512499988079071,
"avg_line_length": 15,
"blob_id": "cc3c43e23077a10899b432a08eced83c753c2042",
"content_id": "64e36a0681aa04ef4b893b9163991d6863bd0d6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 5,
"path": "/main.py",
"repo_name": "sthanhng/testci",
"src_encoding": "UTF-8",
"text": "def log_msg(msg):\n \"\"\"\n Print a msg to the console\n \"\"\"\n print(msg)\n"
},
{
"alpha_fraction": 0.7692307829856873,
"alphanum_fraction": 0.7863247990608215,
"avg_line_length": 38,
"blob_id": "7ff5dc319170119959ab8d7c5b3cf6883abb79a0",
"content_id": "99a9f00deb0235da3240c23acf0c3f6dc2404712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 3,
"path": "/README.md",
"repo_name": "sthanhng/testci",
"src_encoding": "UTF-8",
"text": "# CI with GitHub Actions\n\n\n"
}
] | 2 |
refl3ction/exchanges-apis-consumer
|
https://github.com/refl3ction/exchanges-apis-consumer
|
5868e27249957431693a7b264efa37ffdd9da6f1
|
a042d1a90d69e7cd678a0eca9a5751987cbec030
|
24add646078ff3f9df5114b35a3a1e715eadbb37
|
refs/heads/master
| 2022-03-02T02:54:32.127982 | 2018-06-21T17:56:16 | 2018-06-21T17:56:16 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.742514967918396,
"alphanum_fraction": 0.742514967918396,
"avg_line_length": 19.875,
"blob_id": "7e63f485117a3a28462ed8188c770ed7d4c82009",
"content_id": "846765fa036ce117ca9ec31003d6d7bd5b847991",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 8,
"path": "/README.md",
"repo_name": "refl3ction/exchanges-apis-consumer",
"src_encoding": "UTF-8",
"text": "# exchanges-apis-consumer \nThis application calculates the increase of coin volumes negociation in exchanges \n\n### Running application \n\n```sh\n$ python main.py\n```\n"
},
{
"alpha_fraction": 0.5411859154701233,
"alphanum_fraction": 0.550845205783844,
"avg_line_length": 37.73958206176758,
"blob_id": "878c8e567ecfcc4b36027dcc524b383842cfc3e0",
"content_id": "156293aa67e9b0e33dfea5ac333601299d32ef55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3727,
"license_type": "no_license",
"max_line_length": 220,
"num_lines": 96,
"path": "/classes/CoinVolume.py",
"repo_name": "refl3ction/exchanges-apis-consumer",
"src_encoding": "UTF-8",
"text": "import requests\nimport time, datetime\n\nclass CoinVolume(object):\n\n def __init__(self, name=None):\n\n self.name = name\n self.lastId = 0\n self.initalPrice = 0\n self.initialSellVol = 0\n self.initialBuyVol = 0\n self.currentVol = 0\n\n self.soldVolume = 0\n self.soldPrice = 0\n self.soldBtcVol = 0\n self.soldPerc = 0\n self.soldPercIncrease = 0\n self.soldVolIncrease = 0\n\n self.boughtVolume = 0\n self.boughtPrice = 0\n self.boughtBtcVol = 0\n self.boughtPerc = 0\n self.boughtPercIncrease = 0\n self.boughtVolIncrease = 0\n\n def initializeVars(self):\n orders = self.getOrderBook()\n\n self.soldPrice = orders[0]['Price']\n self.boughtPrice = orders[0]['Price']\n self.lastId = orders[0]['Id']\n for order in orders:\n if order['OrderType'] == 'BUY':\n self.boughtVolume += order['Quantity']\n self.boughtBtcVol += order['Total']\n\n else:\n self.soldVolume += order['Quantity']\n self.soldBtcVol += order['Total']\n\n self.initialSellVol = self.soldVolume\n self.initialBuyVol = self.boughtVolume\n\n def getOrderBook(self):\n print('Getting Order Book from: ' + self.name)\n return requests.get('https://bittrex.com/api/v1.1/public/getmarkethistory?market=' + self.name).json()['result']\n \n\n def calculateVolume(self):\n \n orders = self.getOrderBook()\n st = datetime.datetime.fromtimestamp(time.time()).strftime('%d/%m/%Y %H:%M:%S')\n \n print('Request time: ' + str(st) + '\\n')\n\n _sellVol = self.soldVolume\n _buyVol = self.boughtVolume\n _lastId = orders[0]['Id']\n for order in orders:\n if int(order['Id']) <= self.lastId:\n break\n if order['OrderType'] == 'BUY':\n self.boughtVolume += order['Quantity']\n self.boughtBtcVol += order['Total']\n print('New buy order completed: ' + str(order['Quantity']) + ' ' + self.name)\n\n else:\n self.soldVolume += order['Quantity']\n self.soldBtcVol += order['Total']\n print('New sell order completed: ' + str(order['Quantity']) + ' ' + self.name)\n\n self.lastId = _lastId\n\n #Calculate perc and vol increased since last request\n self.soldPerc, self.soldVolIncrease = self.calculateIncrease(self.soldVolume, _sellVol)\n self.boughtPerc, self.boughtVolIncrease = self.calculateIncrease(self.boughtVolume, _buyVol)\n\n self.soldPercIncrease, _ = self.calculateIncrease(self.soldVolume, self.initialSellVol)\n self.boughtPercIncrease, _ = self.calculateIncrease(self.boughtVolume, self.initialBuyVol)\n\n\n def calculateIncrease(self, newValue, oldValue):\n _increase = newValue - oldValue\n return (_increase / oldValue) * 100, _increase \n\n\n def printLog(self):\n print('############################ {} ##########################################'.format(self.name))\n print(' Volume || Change || Volume Change || Acumulated Change')\n print('sold: {0:.2f}'.format(self.soldVolume) + ' || ' +str(round(self.soldPerc, 2)) +'% || ' +str(round(self.soldVolIncrease, 2)) +' || ' +str(round(self.soldPercIncrease, 2)) + '%')\n print('Bought: {0:.2f}'.format(self.boughtVolume) + ' || ' +str(round(self.boughtPerc, 2)) +'% || ' +str(round(self.boughtVolIncrease, 2)) +' || ' +str(round(self.boughtPercIncrease, 2)) + '%')\n\n print('############################ END ##########################################')\n "
},
{
"alpha_fraction": 0.5907859206199646,
"alphanum_fraction": 0.5962059497833252,
"avg_line_length": 17.5,
"blob_id": "784bcc6fc56342130ace399f6f7d610a5895f140",
"content_id": "8abd0febcb94c5568369313381608437b50bf21e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 20,
"path": "/app/app.py",
"repo_name": "refl3ction/exchanges-apis-consumer",
"src_encoding": "UTF-8",
"text": "from classes.CoinVolume import CoinVolume\nimport time\nprint(__name__)\nprint('Starting Script...')\n\ndef main(coins):\n\n for coin in coins:\n coin.initializeVars()\n while True:\n for coin in coins:\n coin.calculateVolume()\n coin.printLog()\n time.sleep(15) \n\n\nmain([\n CoinVolume('BTC-XVG'), \n CoinVolume('BTC-ETH')\n ])"
}
] | 3 |
pkoluguri/memorize-trigno-table-gui
|
https://github.com/pkoluguri/memorize-trigno-table-gui
|
01a0c75fe5af35a9e0778540a0811789d7ee9ce8
|
dc723a2adea22358f434cc2ecb5d4e3512fa7e24
|
5a0165a37f9c137b982767595b3a6dac223ad03b
|
refs/heads/main
| 2023-08-03T13:24:09.017096 | 2021-10-01T16:37:33 | 2021-10-01T16:37:33 | 412,535,657 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5120745301246643,
"alphanum_fraction": 0.5689294338226318,
"avg_line_length": 31.496814727783203,
"blob_id": "5240ee5d6535db8b26327b9b2819b188082b1459",
"content_id": "0cc87fca0319bd7ab4c5f1f0af4dd02d28700c3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5259,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 157,
"path": "/main.py",
"repo_name": "pkoluguri/memorize-trigno-table-gui",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\r\nfrom tkinter.constants import CENTER, DISABLED, GROOVE, PIESLICE, SUNKEN, TOP\r\nimport random\r\n\r\nwindow = tk.Tk()\r\nsin0 = \"0\"\r\nsin30 = \"1/2\"\r\nsin45 = \"1/root2\"\r\nsin60 = \"root3/2\"\r\nsin90 = \"1\"\r\n\r\ncos0=\"1\"\r\ncos30=\"root3/2\"\r\ncos45=\"1/root2\"\r\ncos60=\"1/2\"\r\ncos90=\"0\"\r\n\r\ntan0=\"0\"\r\ntan30=\"1/root3\"\r\ntan45=\"1\"\r\ntan60=\"root3\"\r\ntan90=\"not defined\"\r\n\r\ncosec0=\"Not defined\"\r\ncosec30=\"2\"\r\ncosec45=\"root2\"\r\ncosec60=\"2/root3\"\r\ncosec90=\"1\"\r\n\r\nsec0=\"1\"\r\nsec30=\"2/root3\"\r\nsec45=\"root2\"\r\nsec60=\"2\"\r\nsec90=\"Not defined\"\r\n\r\ncot0=\"Not defined\"\r\ncot30=\"root3\"\r\ncot45=\"1\"\r\ncot60=\"1/root3\"\r\ncot90=\"0\"\r\n\r\nvalues_w = []\r\nentrys_w=[]\r\ncorrect_values= [[sin0,sin30,sin45,sin60,sin90],\r\n [cos0,cos30,cos45,cos60,cos90],\r\n [tan0,tan30,tan45,tan60,tan90],\r\n [cosec0,cosec30,cosec45,cosec60,cosec90],\r\n [sec0,sec30,sec45,sec60,sec90],\r\n [cot0,cot30,cot45,cot60,cot90] ]\r\n\r\nclass Table:\r\n def __init__(self,root):\r\n for i in range(total_rows):\r\n if i >0:\r\n values_w.append(values)\r\n values=[]\r\n for j in range(total_columns):\r\n text = tk.StringVar()\r\n self.e = tk.Entry(root, width=15, fg='black',\r\n font=('Arial',16,'bold'),textvariable=text)\r\n self.e.grid(row=i+1, column=j)\r\n if i == 0 or j == 0:\r\n self.e.insert(tk.END, lst[i][j])\r\n self.e.configure(state='readonly')\r\n self.e.insert(tk.END, lst[i][j])\r\n values.append(text)\r\n self.btn = tk.Button(root,width=20,text=\"Submit\",relief=SUNKEN,background=\"tomato\",foreground=\"white\",command=submit)\r\n self.btn.grid(row=total_rows+1,column=2)\r\n self.btn = tk.Button(root,width=20,text=\"Refresh\",relief=SUNKEN,background=\"tomato\",foreground=\"white\",command=refresh)\r\n self.btn.grid(row=total_rows+1,column=4)\r\n self.btn = tk.Button(root,width=20,text=\"Fill all\",relief=SUNKEN,background=\"tomato\",foreground=\"white\",command=fill_all)\r\n self.btn.grid(row=total_rows+2,column=3)\r\n\r\ndef set_fill_lst():\r\n lst = [('ratios',0,30,45,60,90)]\r\n lst.append(('sin',sin0,sin30,sin45,sin60,sin90))\r\n lst.append(('cos',cos0,cos30,cos45,cos60,cos90))\r\n lst.append(('tan',tan0,tan30,tan45,tan60,tan90))\r\n lst.append(('cosec',cosec0,cosec30,cosec45,cosec60,cosec90))\r\n lst.append(('sec',sec0,sec30,sec45,sec60,sec90))\r\n lst.append(('cot',cot0,cot30,cot45,cot60,cot90))\r\n return lst\r\n\r\ndef fill_all():\r\n global t\r\n global lst\r\n global values_w\r\n t.e.grid_remove()\r\n lst=set_fill_lst()\r\n values_w=[]\r\n t=Table(window)\r\n \r\n\r\ndef refresh():\r\n global t\r\n global lst\r\n global values_w\r\n t.e.grid_remove()\r\n lst = load_random_values()\r\n values_w=[]\r\n t=Table(window)\r\n\r\ndef submit():\r\n global values_w\r\n global total_rows\r\n global total_columns\r\n not_correct = False\r\n for i in range(total_rows-1):\r\n for j in range(total_columns):\r\n if j > 0 and i > 0:\r\n if values_w[i][j].get() != \"\":\r\n if values_w[i][j].get().lower().replace(\" \",\"\") == correct_values[i-1][j-1].lower().replace(\" \",\"\"):\r\n pass\r\n else:\r\n text.configure(text=\"Some of the values are incorrect\",foreground=\"red\")\r\n not_correct = True\r\n return\r\n else:\r\n text.configure(text=\"Please Fill all the values\",foreground=\"red\")\r\n not_correct = True\r\n return\r\n if not not_correct: \r\n text.configure(text=\"Every Value is Correct!\",foreground=\"green\")\r\n refresh() \r\n\r\ndef load_random_values():\r\n lst = [('ratios',0,30,45,60,90)]\r\n lst.append(('sin',random.choice(['',sin0]),random.choice(['',sin30]),random.choice(['',sin45]),random.choice(['',sin60]),random.choice(['',sin90])))\r\n lst.append(('cos',random.choice(['',cos0]),random.choice(['',cos30]),random.choice(['',cos45]),random.choice(['',cos60]),random.choice(['',cos90])))\r\n lst.append(('tan',random.choice(['',tan0]),random.choice(['',tan30]),random.choice(['',tan45]),random.choice(['',tan60]),random.choice(['',tan90])))\r\n lst.append(('cosec',random.choice(['',cosec0]),random.choice(['',cosec30]),random.choice(['',cosec45]),random.choice(['',cosec60]),random.choice(['',cosec90])))\r\n lst.append(('sec',random.choice(['',sec0]),random.choice(['',sec30]),random.choice(['',sec45]),random.choice(['',sec60]),random.choice(['',sec90])))\r\n lst.append(('cot',random.choice(['',cot0]),random.choice(['',cot30]),random.choice(['',cot45]),random.choice(['',cot60]),random.choice(['',cot90])))\r\n return lst\r\n\r\nlst = load_random_values()\r\n\r\n# find total number of rows and\r\n# columns in list\r\ntotal_rows = len(lst)\r\ntotal_columns = len(lst[0])\r\n\r\nlst = load_random_values()\r\n\r\n# find total number of rows and\r\n# columns in list\r\ntotal_rows = len(lst)\r\ntotal_columns = len(lst[0])\r\n\r\ntext=tk.Label(window,text=\"\")\r\ntext.grid(column=3,row=total_rows+1)\r\n \r\n# create root window\r\nt=Table(window)\r\n\r\n\r\nwindow.mainloop()\r\n"
}
] | 1 |
alireza-mht/MNIST-dataset
|
https://github.com/alireza-mht/MNIST-dataset
|
085711d069d5c508372073de765337a0a2df2986
|
d4bb59693ca76d9cc1ef38151d47f4f259851471
|
8c47fb20b0e76b4f34f02282fa050d687c05b150
|
refs/heads/master
| 2020-04-19T02:44:42.861540 | 2019-02-20T16:23:46 | 2019-02-20T16:23:46 | 167,913,441 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6304434537887573,
"alphanum_fraction": 0.6550139784812927,
"avg_line_length": 34.25352096557617,
"blob_id": "673ee69c9f612882ea8eb54eabacc86dc9daa969",
"content_id": "9b0ce1cb3203a697df71661649587ec0b2a9e0ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5006,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 142,
"path": "/cnn.py",
"repo_name": "alireza-mht/MNIST-dataset",
"src_encoding": "UTF-8",
"text": "import keras\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom keras import optimizers, Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense\nfrom sklearn.metrics import accuracy_score,confusion_matrix, f1_score, recall_score, precision_score\nfrom keras.models import model_from_json\nimport sys\n\nfrom keras import metrics\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\nfrom mnistloader.mnist_loader import MNIST\n\nold_stdout = sys.stdout\nlog_file = open(\"summarycnn.log\",\"w\")\nsys.stdout = log_file\n\nprint(\"cnn Log :\\n\\n\")\n\n\n# Load MNIST Data\ndata = MNIST('./mnistloader/dataset/')\n\nimg_tra, labels_tra = data.load_training()\ntra_img = np.array(img_tra)\ntra_labels = np.array(labels_tra)\n\nimg_te, labels_te = data.load_testing()\nte_img = np.array(img_te)\nte_labels = np.array(labels_te)\n\n\n#Features\nX = tra_img\nX_test = te_img\n\n#Labels\ny = tra_labels\ny_test = te_labels\n\n\nb = np.reshape(X,47040000)\nX = (np.reshape(b, (60000, 28, 28,1))).astype('float32')\n\ns = np.reshape(X_test,7840000)\nX_test = (np.reshape(s, (10000, 28, 28,1))).astype('float32')\n\n# Now each image rows and columns are of 28x28 matrix type.\nimg_rows, img_columns = 28, 28\n#\n# # Transform training and testing data to 10 classes in range [0,classes] ; num. of classes = 0 to 9 = 10 classes\n# total_classes = 10 # 0 to 9 labels\ny = np_utils.to_categorical(y, 10)\ny_test = np_utils.to_categorical(y_test, 10)\ninput_shape = (img_rows, img_columns, 1)\n ####################################For Creating the Model##########################################\n # model = Sequential()\n # #convolutional layer with rectified linear unit activation\n # model.add(Conv2D(32, kernel_size=(3, 3),\n # activation='relu',\n # input_shape=input_shape))\n # #32 convolution filters used each of size 3x3\n # #again\n # model.add(Conv2D(64, (3, 3), activation='relu'))\n # #64 convolution filters used each of size 3x3\n # #choose the best features via pooling\n # model.add(MaxPooling2D(pool_size=(2, 2)))\n # #randomly turn neurons on and off to improve convergence\n # model.add(Dropout(0.25))\n # #flatten since too many dimensions, we only want a classification output\n # model.add(Flatten())\n # #fully connected to get all relevant data\n # model.add(Dense(128, activation='relu'))\n # #one more dropout for convergence' sake :)\n # model.add(Dropout(0.5))\n # #output a softmax to squash the matrix into output probabilities\n # model.add(Dense(10, activation='softmax'))\n # # Defing and compile the SGD optimizer and CNN model\n # print('\\n Compiling model...')\n # #Adaptive learning rate (adaDelta) is a popular form of gradient descent rivaled only by adam and adagrad\n # #categorical ce since we have multiple classes (10)\n # model.compile(loss=keras.losses.categorical_crossentropy,\n # optimizer=keras.optimizers.Adadelta(),\n # metrics=['accuracy'])\n\n # batch_size = 128\n # num_epoch = 10\n # #model training\n # model_log = model.fit(X, y,\n # batch_size=batch_size,\n # epochs=num_epoch,\n # verbose=1,\n # validation_data=(X_test, y_test))\n #Save the model\n #serialize model to JSON\n # model_digit_json = model.to_json()\n # with open(\"model_digit.json\", \"w\") as json_file:\n # json_file.write(model_digit_json)\n # # serialize weights to HDF5\n # model.save_weights(\"model_digit.h5\")\n # print(\"Saved model to disk\")\n #\n\n# Model reconstruction from JSON file\nwith open('model_digit.json', 'r') as f:\n model = model_from_json(f.read())\n\n# Load weights into the new model\nmodel.load_weights('model_digit.h5')\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n\n#test data accuracy\nscore = model.evaluate(X_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n#train data accuracy\nscore = model.evaluate(X, y, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n\n###############################################For precision recall and F1-score##########################\n # test_labels_pred = model.predict(X_test)\n # lastEpochPred = test_labels_pred[:,9]\n # lastEpochTest = y_test[:,9]\n # lastEpochPred = np_utils.to_categorical(lastEpochPred,0)\n # lastEpochTest = np_utils.to_categorical(lastEpochTest,0)\n #\n # acc = accuracy_score(lastEpochTest,lastEpochPred)\n # tprecision = precision_score(lastEpochTest,lastEpochPred,average='macro')\n # trecall = recall_score(lastEpochTest,lastEpochPred,average='macro')\n # tf1 = f1_score(lastEpochTest,lastEpochPred,average='macro')\n #\n # print('\\nPrecision of Classifier on Test Images: ',tprecision)\n # print('\\nRecall of Classifier on Test Images: ',trecall)\n # print('\\nF1 of Classifier on Test Images: ',tf1)\n"
},
{
"alpha_fraction": 0.7677809596061707,
"alphanum_fraction": 0.778805136680603,
"avg_line_length": 33.29268264770508,
"blob_id": "972fb465f7a100858965258db848a822d18df04f",
"content_id": "73ba39aae5f730fffd41fd7b4d049ba47c57872c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2812,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 82,
"path": "/dt.py",
"repo_name": "alireza-mht/MNIST-dataset",
"src_encoding": "UTF-8",
"text": "import sys\nfrom mnistloader.mnist_loader import MNIST\nfrom sklearn import tree\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score, confusion_matrix, f1_score, recall_score, precision_score\n\nfrom sklearn.model_selection import cross_val_score\nimport matplotlib as mpl\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\nimport io\nfrom sklearn.externals.six import StringIO\nfrom matplotlib import style\nstyle.use('ggplot')\n\nold_stdout = sys.stdout\nlog_file = open(\"DTsummary.log\",\"w\")\nsys.stdout = log_file\n\n\nprint(\"Decision Tree Log :\\n\\n\")\n\nmndata = MNIST('./mnistloader/dataset/')\n\ntrainingImages, trainingLabels = mndata.load_training()\ntestImages, testLabels = mndata.load_testing()\n\ntrainingImagesCount = len(trainingImages)\ntestingImagesCount = len(testImages)\n\nclf = tree.DecisionTreeClassifier(criterion=\"gini\", max_depth=32, max_features=784)\n# clf = tree.DecisionTreeClassifier(criterion=\"entropy\", max_depth=32,min_samples_split=8, min_samples_leaf=8 , max_features=784)\n\nclf = clf.fit(trainingImages[:60000], trainingLabels[:60000])\npredictionResTrain = clf.predict(trainingImages)\n\naccuracy = accuracy_score(trainingLabels.tolist(), predictionResTrain)\nprecision = precision_score(trainingLabels.tolist(), predictionResTrain,average='macro')\nrecall = recall_score(trainingLabels.tolist(), predictionResTrain,average='macro')\nf1 = f1_score(trainingLabels.tolist(), predictionResTrain,average='macro')\nconf_mat = confusion_matrix(trainingLabels.tolist(), predictionResTrain)\n\n\nprint('\\nAccuracy of Classifier on Training Image Data: ',accuracy)\nprint('\\nPrecision of Classifier on Training Images: ',precision)\nprint('\\nRecall of Classifier on Training Images: ',recall)\nprint('\\nF1 of Classifier on Training Images: ',f1)\nprint('\\nConfusion Matrix: \\n',conf_mat)\n\nplt.matshow(conf_mat)\nplt.title('Confusion Matrix for Training Data')\nplt.colorbar()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()\n\n\npredictionRes = clf.predict(testImages)\n\nacc = accuracy_score(testLabels.tolist(),predictionRes)\ntprecision = precision_score(testLabels.tolist(),predictionRes,average='macro')\ntrecall = recall_score(testLabels.tolist(),predictionRes,average='macro')\ntf1 = f1_score(testLabels.tolist(),predictionRes,average='macro')\nconf_mat_test = confusion_matrix(testLabels.tolist(),predictionRes)\n\nprint('\\nAccuracy of Classifier on Test Images: ',acc)\nprint('\\nPrecision of Classifier on Test Images: ',tprecision)\nprint('\\nRecall of Classifier on Test Images: ',trecall)\nprint('\\nF1 of Classifier on Test Images: ',tf1)\nprint('\\nConfusion Matrix for Test Data: \\n',conf_mat_test)\n\nplt.matshow(conf_mat_test)\nplt.title('Confusion Matrix for Test Data')\nplt.colorbar()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()\n\nsys.stdout = old_stdout\nlog_file.close()\n"
},
{
"alpha_fraction": 0.7285068035125732,
"alphanum_fraction": 0.7326545715332031,
"avg_line_length": 29.837209701538086,
"blob_id": "d0a909559b902a89befeeb5fc28257ad5ce95383",
"content_id": "e829e6629ec70e8f06ed00f54975254b11a9b3d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2652,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 86,
"path": "/svm.py",
"repo_name": "alireza-mht/MNIST-dataset",
"src_encoding": "UTF-8",
"text": "import sys\nimport numpy as np\nimport pickle\n\nimport matplotlib as mpl\nmpl.use('TkAgg')\n\nfrom sklearn import model_selection, svm, preprocessing\nfrom sklearn.metrics import accuracy_score,confusion_matrix, f1_score, recall_score, precision_score\nfrom mnistloader.mnist_loader import MNIST\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use('ggplot')\n\nold_stdout = sys.stdout\nlog_file = open(\"SVMsummary.log\",\"w\")\nsys.stdout = log_file\nprint(\"SVM Log :\\n\\n\")\ndata = MNIST('./mnistloader/dataset/')\n\nimg_train, labels_train = data.load_training()\ntrain_img = np.array(img_train)\ntrain_labels = np.array(labels_train)\nimg_test, labels_test = data.load_testing()\ntest_img = np.array(img_test)\ntest_labels = np.array(labels_test)\n\nX = train_img\ny = train_labels\n\nclf = svm.SVC(gamma=0.1, kernel='poly')\nclf.fit(X,y)\n\nwith open('MNIST_SVM.pickle','wb') as f:\n\tpickle.dump(clf, f)\n\npickle_in = open('MNIST_SVM.pickle','rb')\nclf = pickle.load(pickle_in)\n\n# acc = clf.score(X,y)\n# y_pred = clf.predict(X)\n# accuracy = accuracy_score(y, y_pred)\n# precision = precision_score(y, y_pred,average='macro')\n# recall = recall_score(y, y_pred,average='macro')\n# f1 = f1_score(y, y_pred,average='macro')\n# conf_mat = confusion_matrix(y,y_pred)\n#\n# print('\\nSVM Trained Classifier Accuracy: ',acc)\n# print('\\nAccuracy of Classifier on Training Images: ',accuracy)\n# print('\\nPrecision of Classifier on Training Images: ',precision)\n# print('\\nRecall of Classifier on Training Images: ',recall)\n# print('\\nF1 of Classifier on Training Images: ',f1)\n# print('\\nConfusion Matrix: \\n',conf_mat)\n#\n# plt.matshow(conf_mat)\n# plt.title('Confusion Matrix for Training Data')\n# plt.colorbar()\n# plt.ylabel('True label')\n# plt.xlabel('Predicted label')\n# plt.show()\n\ntest_labels_pred = clf.predict(test_img)\n\nacc = accuracy_score(test_labels,test_labels_pred)\ntprecision = precision_score(test_labels,test_labels_pred,average='macro')\ntrecall = recall_score(test_labels,test_labels_pred,average='macro')\ntf1 = f1_score(test_labels,test_labels_pred,average='macro')\nconf_mat_test = confusion_matrix(test_labels,test_labels_pred)\n\nprint('\\nAccuracy of Classifier on Test Images: ',acc)\nprint('\\nPrecision of Classifier on Test Images: ',tprecision)\nprint('\\nRecall of Classifier on Test Images: ',trecall)\nprint('\\nF1 of Classifier on Test Images: ',tf1)\nprint('\\nConfusion Matrix for Test Data: \\n',conf_mat_test)\n\n# Plot Confusion Matrix for Test Data\nplt.matshow(conf_mat_test)\nplt.title('Confusion Matrix for Test Data')\nplt.colorbar()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.axis('off')\nplt.show()\n\nsys.stdout = old_stdout\nlog_file.close()\n"
},
{
"alpha_fraction": 0.824999988079071,
"alphanum_fraction": 0.824999988079071,
"avg_line_length": 159,
"blob_id": "adc3c172c6b384f2e51ed4da0b8498d849da2c4e",
"content_id": "1db98f579082ed52e44fd53a310ebec7e4662f13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 320,
"license_type": "no_license",
"max_line_length": 303,
"num_lines": 2,
"path": "/README.md",
"repo_name": "alireza-mht/MNIST-dataset",
"src_encoding": "UTF-8",
"text": "# MNIST-dataset\nImplement different classification methods on Mnist dataset. The mnist dataset contains different hand-written numbers. In this project, the main purpose is detecting the numbers. We create the model based on different classification algorithms like Adaboost, CNN, Decision tree, Random forest, and SVM.\n"
}
] | 4 |
szobin/maze_level3
|
https://github.com/szobin/maze_level3
|
7979f7aae58c0ec7ae10d9ae62a0e95d6f4ff965
|
1cdfb314eb886b085da887bb87d1a72e89e722ab
|
988df23d23eb75f034e6ebc8287b3d690c6a85d8
|
refs/heads/master
| 2023-06-02T20:59:26.503221 | 2021-06-22T03:49:28 | 2021-06-22T03:49:28 | 378,183,715 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 14,
"blob_id": "dec0691be6f1105906c5b1b1d55d9befe39c7edf",
"content_id": "8c3102c628caaf7534826dc832b3409ab039f60f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 1,
"path": "/core/__init__.py",
"repo_name": "szobin/maze_level3",
"src_encoding": "UTF-8",
"text": "MAZE_LEVEL = 3\n"
}
] | 1 |
COLD-PLAY/ZilliqaGraphQL
|
https://github.com/COLD-PLAY/ZilliqaGraphQL
|
a8aaef1fa4934f1cbb63321bf9741bffd5573e9c
|
3970f43c3d9402129621546d75505704d2d2e785
|
0448c67d547b1c1b00f56a03708b25ae7b18aa79
|
refs/heads/master
| 2021-06-24T14:39:28.239235 | 2021-02-25T12:17:03 | 2021-02-25T12:17:03 | 211,491,548 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5206677317619324,
"alphanum_fraction": 0.5786963701248169,
"avg_line_length": 28.9761905670166,
"blob_id": "d2433ccfa58247e1dca5f69cbf7052c0d7a75adb",
"content_id": "42d2578b2662c18ea9adce7f9c2a31d2d70c0d0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1262,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 42,
"path": "/scripts/hasTx.py",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "import requests, time, multiprocessing\n\ndata = {\n\t\"id\": \"1\",\n\t\"jsonrpc\": \"2.0\",\n\t\"method\": \"GetTxBlock\",\n\t\"params\": [\"\"]\n}\nr = []\ndef fun(s, e):\n\tfor i in range(s, e+1):\n\t\tprint(\"第%d个TxBlock %s\" % (i, time.strftime(\"%H:%M:%S\", time.localtime())))\n\t\tdata[\"params\"] = [str(i)]\n\t\tres = requests.post(\"https://api.zilliqa.com/\", json=data).json()[\"result\"]\n\t\tprint(res)\n\t\tif res[\"header\"][\"NumTxns\"]:\n\t\t\tprint(\"////////////////////%d: %d\" % (i, res[\"header\"][\"NumTxns\"]))\n\t\tprint(\"////////////////////%s\" % res[\"header\"][\"Rewards\"])\n\nif __name__ == \"__main__\":\n\t# pool = multiprocessing.Pool(10)\n\t# for i in range(10):\n\t# \tpool.apply_async(fun, (50000+1000*i+1, 50000+1000*(i+1)))\n\t# pool.close()\n\t# pool.join()\n\n\tfun(99, 60000)\n\n# match()-[r:receive]-() delete r;\n# match()-[r:send]-() delete r;\n# match()-[r:to]-() delete r;\n# match()-[r:traded]-() delete r;\n# match(n:Tx) detach delete n;\n# match(n:TxBlock{BlockNum:\"59903\"}) detach delete n;\n# match(n:TxBlock{BlockNum:\"59945\"}) detach delete n;\n# match(n:TxBlock{BlockNum:\"59946\"}) detach delete n;\ncurl -d '{\n \"id\": \"1\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"GetBalance\",\n \"params\": [\"9a690adac3446b2bb6d9f0bf2ef91c1ecdd198f2\"]\n}' -H \"Content-Type: application/json\" -X POST \"https://api.zilliqa.com/\""
},
{
"alpha_fraction": 0.648306131362915,
"alphanum_fraction": 0.6963197588920593,
"avg_line_length": 36.65137481689453,
"blob_id": "d5d2efa9026daae0cac4d836a8fc8e13abe062cf",
"content_id": "429d40c32adca5ba4065cfc062ea312443a2522e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5755,
"license_type": "no_license",
"max_line_length": 258,
"num_lines": 109,
"path": "/documents/document.md",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "## 一、19-5-30<u>(6-27:下划线+粗 表示更新)</u><span style=\"color:red\">(7-15:红色加粗字体 表示更新)</span>\n### 初步下载到Neo4J中及节点 & 关系的定义和展示\n#### 1 利用[官方提供的JSON-RPC API](https://apidocs.zilliqa.com/#introduction)以及Python的[Requests](http://docs.python-requests.org)库和[py2neo]库(https://py2neo.org/v4/)对Zilliqa上的数据进行下载并将数据保存到[Neo4J](https://neo4j.com/)图形数据库中。在getdata.py文件中分别对Zilliqa链上的数据进行了初步的分类和下载保存。\n##### 1.1 分别有以下四种label:\n|TxBlock|Contents|DSBlock|Contents|\n|--|--|--|--|\n|\"BlockNum\"|块号|\"BlockNum\"|块号|\n|\"DSBlockNum\"|所属DS块号|\"Difficulty\"|难度|\n|\"MinerPubKey\"|矿工公钥|\"PoWWinners\"|PoW赢家|\n|\"NumTxns\"|交易数量|\"LeaderPubKey\"|Leader公钥|\n|\"PrevBlockHash\"|上一块的Hash|\"PrevHash\"|上一块的Hash|\n|\"Rewards\"|奖励|\"signature\"|签名|\n|\"Timestamp\"|时间戳|\"Timestamp\"|时间戳|\n\n|Tx|Contents|Account|Contents|\n|--|--|--|--|\n|\"ID\"|交易的Hash|\"address\"|账户地址|\n|\"amount\"|交易金额|<u>**\"balance\"**</u>|<u>**账户余额**</u>|\n|\"senderPubKey\"|发送者的公钥|<u>**\"existed\"**</u>|<u>**账户是否还存在**</u>|\n|\"signature\"|签名|||\n|\"toAddr\"|接收者的地址|||\n\n##### 1.2 另外,这四种label之间有多个关系,具体如下:\n|Relations|Between|Contents|\n|--|--|--|\n|from|Tx->Account|表示交易Tx来自Account|\n|send|Account->Tx|表示Account发出了交易Tx|\n|to|Tx->Account|表示交易Tx发送到Account|\n|receive|Account->Tx|表示Account收到了交易Tx|\n|mine|Account->TxBlock|表示Account挖到TxBlok|\n|mined|TxBlock->Account|表示TxBlok被Account挖到|\n|has|DSBlock->TxBlock&TxBlock->Tx|表示DSBlock(TxBlock)中含有TxBlock(Tx) <u>**新增顺序**</u>|\n|in|DSBlock->TxBlock&TxBlock->Tx|表示TxBlock(Tx)在DSBlock(TxBlock)中 <u>**新增顺序**</u>|\n|traded|Account->Account|表示两者之间有过交易 <u>**新增交易的Hash值**</u>|\n\n#### 2 下载进度 & 节点和关系的展示\n##### 2.1 下载进度,即已保存的节点数量\n|DSBlockNums|TxBlockNums|TxNums|Accounts|\n|--|--|--|--|\n|<u>**816**</u>|<u>**81525**</u>|<u>**62218**</u>|<u>**10708**</u>|\n|**<div style=\"color:red\">1710</div>**|**<div style=\"color:red\">170900</div>**|**<div style=\"color:red\">220424</div>**|**<div style=\"color:red\">34559</div>**|\n\n<b>count(has) = count(Tx) + count(TxBlock)</b>\n\n##### 2.2 节点内容展示\n<!-- \n\n\n -->\n  <u>**pass**</u>\n\n#### <u>3 API设计 & 实现</u>\n##### <u>3.1 实现的API</u>\n  <u>**详见[API Introduction](apis.md)**</u>\n```python\n1. getBalance(address) # 获取address 账户的余额\n2. getTxs(address1, address2, type, start, end) # 获取账户1、2发生过的交易\n3. getMiners(start, end) # 获取从start 到end 过程中的所有矿工\n4. getAccountTxs(address) # 获取address 账户发生过的所有交易\n5. getMinedBlock(address) # 获取address 账户挖过的所有块\n6. \n```\n\n##### <u>3.2 操作界面</u>\n  <u>**查询界面,可供选择**</u>\n\n  <u>**结果界面,返回json数据和简单的图形**</u>\n\n\n#### <u>4 Issues & Operations</u>\n##### <u>4.1 Issues</u>\n<u><b>\n1. 19/06/22,昨晚同步的时候出现了`GetTransaction` 出现错误的情况,跟代码无关,是api 的问题。\n\t解决:`GetTransaction` 错误则重新`GetTransaction`\n2. 19/06/22,Tx 已存在不该这么写,否则会在删除TxBlock 时失去TxBlock 与Tx 之间的关系。\n\t解决:Tx 已存在时同样需要添加TxBlock 与Tx 之间的关系\n3. 19/06/24,半天找不到`py2neo.data.Node` & `py2neo.data.Relationship`的`id`以及前者的label。\n\t解决:`GraphObject.identity, list(py2neo.data.Node.labels())`\n4. 19/06/25,半天找不到`py2neo.data.Relationship` 的头节点和尾节点以及关系名。\n\t解决:`py2neo.data.Relationship.start_node/end_node, list(py2noe.data.Relationship.types())`\n4. 19/06/25,Python-Flask向js传递json数据时js解析数据出现错误。\n\t解决:`js code: eval({{json_data|safe }})`,需要加上`safe `\n5. 19/06/26,`getdata.py`代码同步速度缓慢,一个交易`5s`,需进行优化。\n\t解决:待解决\n6. 19/06/27,`app.py`代码查询节点数目过多时速度缓慢,需进行优化。\n\t解决:待解决\n7. 19/07/25,`175334`块出现问题,似乎是撤回了一些个交易,无法查到其中的19个交易信息,导致getdata.py 跑不动了。\n\t解决:在getdata.py中对该块进行特殊处理即可\n8. \n</b></u>\n##### <u>4.2 py2neo 操作</u>\n<u><b>\n```python\n1. walk(GraphObject)\n2. dict(GraphObject)\n3. GraphObject.identity\n4. py2neo.data.Relationship.start_ndoe/end_node\n5. list(py2neo.data.Node.labels())\n6. list(py2neo.data.Relationship.types())\n7. \n```\n</b></u>\n\n<u><b>常用查询语句:\n```batch\nmatch (n:TxBlock{BlockNum:\"59903\"})-[has]->(p:Tx)return count(p)\nmatch ()-[p:has]->() return count(p)\n```\n</b></u>"
},
{
"alpha_fraction": 0.6415510177612305,
"alphanum_fraction": 0.6572428941726685,
"avg_line_length": 32.973567962646484,
"blob_id": "6486a745c48a8f1fb1a4c64ce1ad013811cb63ca",
"content_id": "cf8acb9c2efa776ed727eeebbdf2be407990bc4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8037,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 227,
"path": "/app.py",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "__author__ = 'ZhouLiao'\nfrom py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher\nfrom flask import Flask, render_template, request, jsonify, Response\nfrom logging.config import dictConfig\nimport json, time\n# from flask_talisman import Talisman\nfrom flask_sslify import SSLify\n\nclass Core(object):\n\tdef __init__(self, uri, user, pwd):\n\t\tself.graph = Graph(uri, username=user, password=pwd)\n\t\tself.matcher_node = NodeMatcher(self.graph)\n\t\tself.matcher_relation = RelationshipMatcher(self.graph)\n\n\t# 将py2neo.data.Node 转换成echarts 可渲染的数据并返回\n\tdef toEchartsNode(self, node):\n\t\tlabel_category = {\n\t\t\t\"TxBlock\": [node[\"BlockNum\"], 0], \"Tx\": [node[\"ID\"], 3],\n\t\t\t\"Account\": [node[\"address\"], 2], \"DsBlock\": [node[\"BlockNum\"], 1]\n\t\t}\n\t\tdata = {\n\t\t\t\"name\": str(node.identity), \"label\": label_category[list(node.labels)[0]][0],\n\t\t\t\"category\": label_category[list(node.labels)[0]][1]\n\t\t}\n\t\tdata.update(dict(node))\n\t\treturn data\n\n\tdef toEchartsRelation(self, relation):\n\t\tdata = {\"id\": str(relation.identity),\n\t\t\t\t\"source\": str(relation.start_node.identity),\n\t\t\t\t\"target\": str(relation.end_node.identity),\n\t\t\t\t\"relationship\": list(relation.types())[0]}\n\t\tdata.update(dict(relation))\n\t\treturn data\n\n\tdef getNode(self, label, value):\n\t\tif label == \"TxBlock\" or label == \"DsBlock\":\n\t\t\tnodes = self.matcher_node.match(label, BlockNum=value)\n\t\telif label == \"Account\":\n\t\t\tnodes = self.matcher_node.match(label, address=value)\n\t\t\tif not nodes: nodes = self.matcher_node.match(label, address_new=value)\n\t\telif label == \"Tx\":\n\t\t\tnodes = self.matcher_node.match(label, ID=value)\n\t\tif not nodes: self.error(\"%s %s is not existed!\" % (label, value))\n\t\treturn nodes.first()\n\n\tdef getRelation(self, nodes, label):\n\t\trels = self.matcher_relation.match(nodes, label)\n\t\tlabel_attr = {\"TxBlock\": \"BlockNum\", \"DsBlock\": \"BlockNum\", \"Account\": \"address\", \"Tx\": \"TxHash\"}\n\t\tif not rels:\n\t\t\tif len(nodes) == 0:\n\t\t\t\tself.error(\"Relation %s is not existed!\" % (label))\n\t\t\telif len(nodes) == 1:\n\t\t\t\tself.error(\"Relation %s of <%s: %s> is not existed!\" % (\n\t\t\t\t\tlabel, list(nodes[0].labels)[0],\n\t\t\t\t\tnodes[0][label_attr[list(nodes[0].labels)[0]]]\n\t\t\t\t))\n\t\t\telif len(nodes) == 2:\n\t\t\t\tself.error(\"Relation %s between <%s: %s> & <%s: %s> is not existed!\" % (\n\t\t\t\t\tlabel, list(nodes[0].labels)[0], nodes[0][label_attr[list(nodes[0].labels)[0]]],\n\t\t\t\t\tlist(nodes[1].labels)[0], nodes[1][label_attr[list(nodes[1].labels)[0]]]\n\t\t\t\t))\n\t\t\telse:\n\t\t\t\tself.error(\"getRelation() Parameters Wrong!\")\n\n\t\treturn rels\n\n\t# 更新至上一次交易(或矿工)中出现该address 时的官方API 得到的balance\n\tdef getBalance(self, address):\n\t\taccount = self.getNode(\"Account\", address)\n\t\treturn [account]\n\n\t# 获取address1 和address2 的交易\n\t# type:\n\t# \t1. \"recent\": 返回最近的1个(如果不足则返回全部)交易\n\t# \t2. \"all\": 返回所有交易(排序后)\n\t# \t3. \"period\": 返回两个TxBlock 之间的交易(排序后)\n\tdef getTxs(self, address1, address2, type=\"all\", start=None, end=None):\n\t\tif start and end:\n\t\t\tstart, end = int(start), int(end)\n\t\taccount1 = self.getNode(\"Account\", address1)\n\t\taccount2 = self.getNode(\"Account\", address2)\n\t\ttxs_hash, txs = [tx[\"TxHash\"] for tx in self.getRelation([account1, account2], \"traded\")], []\n\t\tfor tx_hash in txs_hash:\n\t\t\ttxs.append(self.getTx(tx_hash))\n\t\t# 按所在的TxBlock 顺序排列\n\t\ttxs = self.sortTxs(txs)\n\n\t\tif type == \"all\":\n\t\t\treturn txs\n\t\telif type == \"recent\":\n\t\t\treturn txs[0]\n\t\telif type == \"period\" and start and end and start <= end:\n\t\t\tres = []\n\t\t\tfor tx in txs:\n\t\t\t\tif start <= int(tx[\"epoch_num\"]) <= end:\n\t\t\t\t\tres.append(tx)\n\t\t\treturn res\n\t\telse:\n\t\t\tself.error(\"getTxs() Parameters Wrong!\")\n\t\n\t# 对交易节点按所在的TxBlock 倒序排列\n\tdef sortTxs(self, txs):\n\t\treturn sorted(txs, key=lambda tx: int(tx[\"epoch_num\"]), reverse=True)\n\n\t# 对块节点按块号BlockNum 倒序排列\n\tdef sortBlocks(self, blocks):\n\t\treturn sorted(blocks, key=lambda block: int(block[\"BlockNum\"]), reverse=True)\n\n\t# 账户1、2是否发生过交易\n\tdef isTraded(self, address1, address2):\n\t\taccount1 = self.getNode(\"Account\", address1)\n\t\taccount2 = self.getNode(\"Account\", address2)\n\t\tif not account1 or not account2:\n\t\t\tself.error(\"Account Not Existed!\")\n\t\t\treturn False\n\t\tif len(self.getRelation([account1, account2], \"traded\")):\n\t\t\treturn True\n\t\treturn False\n\n\t# 获取BlockNum 从start到end过程中的矿工\n\tdef getMiners(self, start, end):\n\t\tif not start.isdigit() or not end.isdigit():\n\t\t\tself.error(\"getMiners() Parameters Wrong: Start & End Should Be Number!\")\n\t\tminers, start, end = [], int(start), int(end)\n\t\tfor i in range(start, end+1):\n\t\t\tblock = self.getNode(\"TxBlock\", str(i))\n\t\t\tmined = self.getRelation([block], \"mined\").first()\n\t\t\tminers.append(mined.end_node)\n\t\treturn miners\n\t\n\t# 返回address 账户的所有交易\n\tdef getAccountTxs(self, address):\n\t\ttxs = []\n\t\taccount = self.getNode(\"Account\", address)\n\t\tsends = self.matcher_relation.match([account], \"send\")\n\t\treces = self.matcher_relation.match([account], \"receive\")\n\t\tfor send in sends:\n\t\t\ttxs.append(send.end_node)\n\t\tfor rece in reces:\n\t\t\ttxs.append(rece.end_node)\n\t\ttxs = self.sortTxs(txs)\n\t\treturn txs\n\n\t# 返回address 账户挖的所有块\n\tdef getMinedBlocks(self, address):\n\t\taccount, blocks = self.getNode(\"Account\", address), []\n\t\tmines = self.getRelation([account], \"mine\")\n\t\tfor mine in mines:\n\t\t\tblocks.append(mine.end_node)\n\t\tblocks = self.sortBlocks(blocks)\n\t\treturn blocks\n\n\tdef error(self, message):\n\t\tself.printf(\"ERROR: %s %s\" % (message, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\t\traise Exception(\"ERROR: \" + message)\n\t\n\tdef printf(self, message):\n\t\tprint(message)\n\t\twith open(\"log/query.log\", \"a\") as file:\n\t\t\tfile.write(message + '\\r')\n\napp = Flask(__name__)\n# csp = {\n# \t'default-src': '\\'self\\'',\n# \t'script-src': '\\'self\\''\n# }\n# Talisman(\n# \tapp,\n# \tcontent_security_policy=csp,\n# \tcontent_security_policy_nonce_in=['script-src']\n# )\nSSLify(app)\nuri, user, pwd = \"http://localhost:7474\", \"neo4j\", \"liaozhou1998\"\ncore = Core(uri, user, pwd)\n\[email protected]('/')\ndef index():\n\treturn render_template(\"index.html\")\n\[email protected]('/docs/')\ndef docs():\n\treturn render_template(\"docs.html\")\n\[email protected]('/query/', methods=['POST', 'GET'])\ndef query():\n\tfor key, value in request.form.items():\n\t\tif value:\n\t\t\tcore.printf(\"key: %s value: %s %s\" % (key, value, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\n\ttry:\n\t\tif request.form[\"method\"] == \"api\":\n\t\t\tmethod = request.form[\"api_method\"]\n\t\t\tif method == \"getBalance\":\n\t\t\t\tres = core.getBalance(request.form[\"parameter1\"])\n\t\t\telif method == \"getTxs\":\n\t\t\t\taddress1, address2, type_ = request.form[\"parameter1\"], request.form[\"parameter2\"], request.form[\"parameter3\"]\n\t\t\t\tstart, end = request.form[\"parameter4\"], request.form[\"parameter5\"]\n\t\t\t\tres = core.getTxs(address1, address2, type_, start, end)\n\t\t\telif method == \"getMiners\":\n\t\t\t\tres = core.getMiners(request.form[\"parameter1\"], request.form[\"parameter2\"])\n\t\t\telif method == \"getAccountTxs\":\n\t\t\t\tres = core.getAccountTxs(request.form[\"parameter1\"])\n\t\t\telse:\n\t\t\t\tres = core.getMinedBlocks(request.form[\"parameter1\"])\n\t\t\tres_ = [core.toEchartsNode(_) for _ in res]\n\t\t\tres_ = {\"nodes\": res_, \"links\": []}\n\t\telse:\n\t\t\tres = core.graph.run(request.form[\"cql\"])\n\t\t\t# 表示是node\n\t\t\tif res.keys()[0] == 'n':\n\t\t\t\tres = [_.values()[0] for _ in res]\n\t\t\t\tres_ = [core.toEchartsNode(_) for _ in res]\n\t\t\t\tres_ = {\"nodes\": res_, \"links\": []}\n\t\t\t# 否则是relation\n\t\t\telse:\n\t\t\t\tres = [_.values()[0] for _ in res]\n\t\t\t\tres_ = [core.toEchartsRelation(_) for _ in res]\n\t\t\t\tres_ = {\"nodes\": [], \"links\": res_}\n\t\tres = [dict(_) for _ in res]\n\t\treturn render_template(\"result.html\", res=json.dumps(res), res_=json.dumps(res_))\n\texcept Exception as e:\n\t\treturn render_template(\"error.html\", message=str(e))\n\n# app.run(host=\"0.0.0.0\", port=1116, ssl_context=('cert/server.crt', 'cert/server.key'))\n# app.run(host=\"0.0.0.0\", port=1116)\napp.run(host=\"0.0.0.0\", port=1116, ssl_context=('cert/2968267_zilliqagraph.com.pem', 'cert/2968267_zilliqagraph.com.key'))"
},
{
"alpha_fraction": 0.4938061833381653,
"alphanum_fraction": 0.7094972133636475,
"avg_line_length": 31.682538986206055,
"blob_id": "00824e360b8ce5e8a8bf9e99a0eab3990fa294cd",
"content_id": "206c8bbbacb4eeebd779e9cd21e5d6ab8cedffac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4129,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 126,
"path": "/scripts/test.py",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "__author__ = \"ZhouLiao\"\nfrom py2neo import Graph, Node, Relationship, NodeMatcher\nimport requests, execjs, time, json\n\ndata = {\n\t\"id\": \"1\",\n\t\"jsonrpc\": \"2.0\",\n\t\"method\": \"\",\n\t\"params\": [\"\"]\n}\n\n# curl -d '{\n# \"id\": \"1\",\n# \"jsonrpc\": \"2.0\",\n# \"method\": \"GetBalance\",\n# \"params\": [\"21f50477b4ebecf86a737e22cd0f611b7184665b\"]\n# }' -H \"Content-Type: application/json\" -X POST \"https://api.zilliqa.com/\"\n\njsCode = '''\n\tvar hashjs = require('hash.js');\n\tfunction getAddress(pubKey) {\n\t\treturn hashjs.sha256().update(pubKey, 'hex').digest('hex').slice(24);\n\t}\n'''\neval = execjs.compile(jsCode)\n\ndef getAddress(pubKey):\n\treturn eval.call(\"getAddress\", pubKey[2:])\n\ndef getResult(method, params):\n\tdata[\"method\"] = method\n\tdata[\"params\"] = [params]\n\ttry:\n\t\tres = requests.post(\"https://api.zilliqa.com/\", json=data, timeout=10).json()\n\t\treturn res[\"result\"] if \"result\" in res else res\n\texcept requests.exceptions.RequestException as e:\n\t\tprint(\"///////////////////////////连接超时: 重试!\")\n\t\tprint(e)\n\t\treturn getResult(method, params)\ndef test():\n\tres = getResult(\"GetBalance\", \"1931e8bb19518a049a2824333baee89a433559c6\")\n\tres = getResult(\"GetRecentTransactions\", \"\")\n\tres = getResult(\"GetTransaction\", \"7581733a5a7b6edf1778446f8c01565637025cb43fa6240a01ddcaeb6ff80f4d\")\n\tres = getResult(\"GetTxBlock\", \"60674\")\n\tres = getResult(\"GetDsBlock\", \"1430\")\n\t# 7581733a5a7b6edf1778446f8c01565637025cb43fa6240a01ddcaeb6ff80f4d\n\tres = getResult(\"GetTransactionsForTxBlock\", \"60673\")\n\tprint(res)\n\ttxs = 0\n\tfor i in range(59903, 60007):\n\t\tprint(i)\n\t\tres = getResult(\"GetTxBlock\", str(i))\n\t\tprint(res[\"header\"][\"NumTxns\"])\n\t\ttxs += int(res[\"header\"][\"NumTxns\"])\n\tprint(txs)\n\taddress = getAddress(\"0x02545402B1D5BCADDAEF278FAEF5FD926C6917A5CB3E17FC838F863ED041B88F5A\")\n\tprint(address)\n\t# 1931e8bb19518a049a2824333baee89a433559c6\n\t# 26AFb91DC1B2083E64fe58b3CCBe943D945F612E\n\n\tfile = open(\"out.log\", \"a\")\n\tfor i in range(4):\n\t\tfile.write(time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())+'\\r')\n\tprint(\"%s\" % time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime()))\n\ta = []\n\n# res = getResult(\"GetTransactionsForTxBlock\", \"175334\")\n# print(res)\n# err, nrr = 0, 0\n# for txs in res:\n# \tif txs:\n# \t\tfor tx in txs:\n# \t\t\tprint(tx)\n# \t\t\tres_ = getResult(\"GetTransaction\", \"tx\")\n# \t\t\tprint(res_)\n# \t\t\tif \"error\" in res_:\n# \t\t\t\terr += 1\n# \t\t\telse: nrr += 1\n# print(err, nrr)\n\ntxs = [\n\t# \"93cd69e14e5f624f14a09254c47a729fd2295d69bedfaac03056513b3ff6af29\",\n\t# \"8d702cb302fcb25fe0b33b07bbd11c816815dda5747af24326aeac20ecfd6c73\",\n\t# \"05bab941d5eeac7ce78b8bc8d890f42675b07975c678ae5184a50df67913ae26\",\n\t# \"078efcf10473251f4a4ba8c4ce18810f06d3cafcf2eb8122a5f4293e9d2bdea3\",\n\t# \"097a0ef43987adcf511c0f140ac316b3f7ac8b1819fdd6a1c44da82fbad541ae\",\n\t# \"0aa3c892a183457dd4b0252e960466635c2508f6a1b6f528e84fbbc7682c6710\",\n\t# \"0b6beb2cea06737a531f02493f5aa788afbde15fd3e87006b65800d463909ee8\",\n\t# \"1355121906ac35972e6b65026b07b427b5c4ae148c225a9a53b026e5749d4705\",\n\t# \"255f44c45f43197cbc53f2b33795093186d4cf12dead77b2e5166a88002f294a\",\n\t# \"2909e3341a97d57ffaccd514cb2e9d6ea402b1be927125774ce9501fb813b25b\",\n\t# \"297a221754dd0b34b43adeadb762d95ee6bdf0a96fbbc42e06f6ab5a47f559e6\",\n\t# \"303908b5ccba037748bb93946092470247192e25c7c0c5f17d55dca19122a9c9\",\n\t# \"35ed1d1e0611b805a5de8b5ffa162192c6a7bfb5b592916cdda8fa856bd86a5b\",\n\t# \"4fdcb5cee42ca7c0f639101c08658019ed55745094d1d0f6abcca2981ee3adc9\",\n\t# \"5161e9eda90904f601d16702586d35bb82db702e88f24b7a36c1691dde6e7f4e\",\n\t# \"61be4d61ec71740f835b2910c182fe35d161cfc1f3b2c4b9debef3b229b90ef3\",\n\t# \"ada4f7b094d718bedffdc4827ab1e0ab2bd14d5fb2840505f81a4ddcf8c1bff3\",\n\t# \"d1b8d0c891e88ea12bea3fb9c58b2fc69a5cdffe46024f6a17f51c4e2cc807b7\",\n\t# \"e0b14cba55e1e1e17e5be24a6b77ab3109ed788360aeb8652f74c817b3e4d365\",\n\t\"afffe57d469e428c176b4e74ba81fad59525ca34f5380d12ec3fb3985e6db5d6\",\n\t\"6eaa46d2e4b6940a0435d90e6bb836f64a3008925d3cb0ae0e603584e6db2dbd\"\n]\n\n# for tx in txs:\n# \tprint(tx)\n# \tres__ = getResult(\"GetTransaction\", tx)\n# \t# while \"error\" in res__:\n# \t# \tres__ = getResult(\"GetTransaction\", tx)\n# \t# \tprint(tx, \"error\")\n# \tif \"error\" in res__:\n# \t\tprint(\"error\")\n\n# account = Account(address=\"a11de7664f55f5bdf8544a9ac711691d01378b4c\")\n# print(account.bech32_address)\nprint(233)\n# res = getResult(\"GetTxBlock\", \"202775\")\n# import requests, json\n# res = requests.post(\"https://api.zilliqa.com/\", json={\n# \t\"id\": \"1\",\n# \t\"jsonrpc\": \"2.0\",\n# \t\"method\": \"GetTxBlock\",\n# \t\"params\": [\"202775\"]\n# }, timeout=10).json()\n# print(res)\nprint(\"../log/%s.log\" % time.strftime('%Y-%m-%d',time.localtime(time.time())))"
},
{
"alpha_fraction": 0.6603680849075317,
"alphanum_fraction": 0.6728834509849548,
"avg_line_length": 32.685951232910156,
"blob_id": "6cfd9f5747c78a2563b55bfdd1c9d841c97c1d1f",
"content_id": "18def633d9555faf75cfbc9c73684a7b28ee8c84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4289,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 121,
"path": "/scripts/getdata_debug.py",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "__author__ = \"ZhouLiao\"\nimport requests, execjs, time\n# from py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher\nfrom func_timeout import *\nfrom pyzil.account import Account\nfrom pyzil.crypto.zilkey import is_bech32_address\n\ndata = {\n\t\"id\": \"1\",\n\t\"jsonrpc\": \"2.0\",\n\t\"method\": \"\",\n\t\"params\": [\"\"]\n}\n\nclass GetData(object):\n\tdef __init__(self, uri, user, pwd, curTxBlockNum):\n\t\tself.curTxBlockNum = curTxBlockNum\n\t\tself.jsCode = '''\n\t\t\tvar hashjs = require('hash.js');\n\t\t\tfunction getAddress(pubKey) {\n\t\t\t\treturn hashjs.sha256().update(pubKey, 'hex').digest('hex').slice(24);\n\t\t\t}\n\t\t'''\n\t\tself.eval = execjs.compile(self.jsCode)\n\t\n\t# 输出至控制台以及log 文件中\n\tdef printf(self, message):\n\t\tprint(message)\n\n\tdef getAddress(self, address, type_):\n\t\tif type_ == \"pubKey\":\n\t\t\told_address = self.eval.call(\"getAddress\", address[2:])\n\t\t\taccount = Account(address=old_address)\n\t\t\treturn (old_address, account.bech32_address)\n\t\telif type_ == \"toBech32\":\n\t\t\tif is_bech32_address(address): # no need to transfer to bech32 format\n\t\t\t\treturn (address, address)\n\t\t\taccount = Account(address=address)\n\t\t\treturn (address, account.bech32_address)\n\n\tdef getResult(self, method, params):\n\t\tdata[\"method\"] = method\n\t\tdata[\"params\"] = [params]\n\t\ttry:\n\t\t\tres = requests.post(\"https://api.zilliqa.com/\", json=data, timeout=10).json()\n\t\t\treturn res[\"result\"] if \"result\" in res else res\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tself.printf(\"ERROR: Api Connect Timeout: Recall Api!\")\n\t\t\treturn self.getResult(method, params)\n\n\tdef getTxBlcokData(self, startBlock, endBlock):\n\t\tfor i in range(startBlock, endBlock+1):\n\t\t\tself.getOneTxBlcokData(str(i))\n\n\tdef getOneTxBlcokData(self, blockNum):\n\t\tself.printf(\"the %s TxBlock %s\" % (blockNum, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\n\t\tres = self.getResult(\"GetTxBlock\", blockNum)\n\t\twhile \"header\" not in res or res[\"header\"][\"Timestamp\"] == \"0\":\n\t\t\t# 表示到达最新的一个块,等一分钟再继续更新\n\t\t\tself.printf(\"Waiting for Next Block %s\" % time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime()))\n\t\t\ttime.sleep(60)\n\t\t\tres = self.getResult(\"GetTxBlock\", blockNum)\n\n\t\t# 当TxBlock中有交易时,获取交易信息以及交易双方的账户信息并存入Neo4j中\n\t\tif res[\"header\"][\"NumTxns\"]:\n\t\t\tself.printf(\"%d Txs in all\" % res[\"header\"][\"NumTxns\"])\n\t\t\tself.getTxData(res[\"header\"][\"BlockNum\"])\n\n\tdef getDsBlockData(self, startBlock, endBlock):\n\t\tfor i in range(startBlock, endBlock+1):\n\t\t\tself.getOneDsBlcokData(str(i))\n\n\t# 返回DsBlock 节点\n\tdef getOneDsBlcokData(self, blockNum):\n\t\tself.printf(\"the %s DsBlock %s\" % (blockNum, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\t\tres = self.getResult(\"GetDsBlock\", blockNum)\n\t\n\tdef getTxData(self, txBlockNum):\n\t\tres, order = self.getResult(\"GetTransactionsForTxBlock\", txBlockNum), 1\n\t\twhile \"error\" in res:\n\t\t\tself.printf(\"ERROR: get microblock failed! recatch txs!\" + res[\"error\"][\"message\"])\n\t\t\tres = self.getResult(\"GetTransactionsForTxBlock\", txBlockNum)\n\t\tfor MicroBlock in res:\n\t\t\tif not MicroBlock:\n\t\t\t\tcontinue\n\t\t\tfor txHash in MicroBlock:\n\t\t\t\tself.getOneTxData(txBlockNum, txHash, order)\n\t\t\t\torder += 1\n\t\n\t# 获取一条交易的信息\n\t@func_set_timeout(60)\n\tdef getOneTxData(self, txBlockNum, txHash, order):\n\t\tself.printf(\"The %s transaction %s\" % (txHash, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\n\t\tres_ = self.getResult(\"GetTransaction\", txHash)\n\t\t# 获取交易信息时出错\n\t\t############################################# 待修改\n\t\tif \"error\" in res_:\n\t\t\tself.printf(\"ERROR: get txn info failed!\" + res_[\"error\"][\"message\"])\n\t\t\treturn -1\n\t\t\t\n\t\t# 解决调用api 时可能产生的问题\n\t\twhile \"receipt\" not in res_:\n\t\t\tres_ = self.getResult(\"GetTransaction\", txHash)\n\n\t\t# receipt不符合格式 重新规整一下\n\t\tres_[\"cumulative_gas\"] = res_[\"receipt\"][\"cumulative_gas\"]\n\t\tres_[\"epoch_num\"] = res_[\"receipt\"][\"epoch_num\"]\n\t\tres_[\"success\"] = str(res_[\"receipt\"][\"success\"])\n\t\tres_.pop(\"receipt\")\n\n\tdef run(self):\n\t\twhile True:\n\t\t\tself.getOneTxBlcokData(str(self.curTxBlockNum))\n\t\t\tself.curTxBlockNum += 1\n\nif __name__ == \"__main__\":\n\turi, user, pwd, curTxBlockNum = \"http://localhost:7474\", \"neo4j\", \"liaozhou1998\", 185174\n\tGeter = GetData(uri, user, pwd, curTxBlockNum)\n\tGeter.run()"
},
{
"alpha_fraction": 0.669579029083252,
"alphanum_fraction": 0.6894360780715942,
"avg_line_length": 34,
"blob_id": "d517903902a31489219566d9b7bde723be97e70f",
"content_id": "669b487946da4860fb261f38a30f8a1d91fa90ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1299,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 36,
"path": "/scripts/update.py",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "__author__ = \"ZhouLiao\"\nfrom pyzil.account import Account\nfrom pyzil.crypto.zilkey import is_bech32_address\nfrom py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher\n\n# 将所有的交易中的fromAddr 和toAddr 新增其对应的bech32 格式的地址\nuri, user, pwd = \"http://localhost:7474\", \"neo4j\", \"liaozhou1998\"\ngraph = Graph(uri, username=user, password=pwd)\nmatcher_node = NodeMatcher(graph)\n\ndef getBech32(address):\n if is_bech32_address(address):\n return address\n account = Account(address=address)\n return account.bech32_address\n\ndef printf(message):\n print(message)\n with open(\"../log/update.log\", \"a\") as file:\n file.write(message + '\\r')\n\n# txs = matcher_node.match(\"Tx\")\naccounts = matcher_node.match(\"Account\")\n# for tx in txs:\n# fromAddr, toAddr = tx[\"fromAddr\"], tx[\"toAddr\"]\n# fromAddrNew, toAddrNew = getBech32(fromAddr), getBech32(toAddr)\n# tx.update({\"fromAddrNew\": fromAddrNew, \"toAddrNew\": toAddrNew})\n# printf(fromAddr + \",\" + fromAddrNew + \",\" + toAddr + \",\" + toAddrNew)\n# graph.push(tx)\n\nfor account in accounts:\n address = account[\"address\"]\n address_new = getBech32(address)\n account.update({\"address_new\": address_new})\n printf(address + \",\" + address_new)\n graph.push(account)"
},
{
"alpha_fraction": 0.554679811000824,
"alphanum_fraction": 0.5665024518966675,
"avg_line_length": 26.45945930480957,
"blob_id": "e50f24cdba1d4dcb79cd03d0c3c107411abb9f64",
"content_id": "c0c649094ec48dd5189c54169ee3716d2e2f8874",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1093,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 37,
"path": "/README.md",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "### 1. Demo's File Structure\n```\n─Zilliqa\n ├─documets\n | ├─document.md\n | ...\n ├─log\n | ├─out.log\n | ...\n ├─pictures\n ├─scripts\n | ├─getdata.py\n | ...\n ├─static\n │ ├─js\n | | ├─jquery-3.4.1.min.js\n | | ├─echarts-all.js \n | | ...\n │ ├─css\n | | └─style.css\n ├─templates\n │ ├─index.html\n │ ├─result.html\n | ...\n └─app.py\n```\n### 2. How to Use\n#### 2.1 Query Interface\n  You can choose **```'Method': API or CQL```**, and after you choose **```API```**, you'll continue to choose **```'api_method': contains getBalance, getTxs, getMiners, getAccountTxs, getMinedBlock```**\n\n#### 2.2 Result Interface\n  You'll get the json_data of your query on the page's left and **one echarts graph** on the right.\n\n\n### 3. Docs\n  the apis' doc is [API-Docs](documents/apis.md).\n  the demo's doc is [Demo-Docs](documents/document.md)."
},
{
"alpha_fraction": 0.718426525592804,
"alphanum_fraction": 0.7453415989875793,
"avg_line_length": 68.14286041259766,
"blob_id": "a4efa0e332ad808f50011e2fb099f0cc264c1abf",
"content_id": "d03004568144c3a01523dd9b863b37600e45888f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 509,
"license_type": "no_license",
"max_line_length": 240,
"num_lines": 7,
"path": "/domain.md",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "## Domain Name Registration & Certificate Application\n### 1. Domain Name Registration\n\nWe register the top domain name `zilliqagraph.com` on [阿里云](https://wanwang.aliyun.com) in China. As you can see in the upper picture, the price for the first year is `¥55(about $7.78`), and then renewal fee of 1 year is `¥69(about $9.76)`.\n\n### 2. Certificate Application\nWe can get the free certificate from some certificate issuing websites like \"aliyun.com, bt.com\"."
},
{
"alpha_fraction": 0.608430027961731,
"alphanum_fraction": 0.6747098565101624,
"avg_line_length": 26.75423812866211,
"blob_id": "e682327249085346a3b48b499e66142c4bf7bbeb",
"content_id": "830a41a3db2b284cae92fa2f63e9649072ee9d7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3346,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 118,
"path": "/scripts/neo4j.py",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "from py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher, walk\nimport time, json\n\n##连接neo4j数据库,输入地址、用户名、密码\ngraph = Graph('http://localhost:7474', username='neo4j', password='liaozhou1998')\nmatcher = NodeMatcher(graph)\nmatcher_r = RelationshipMatcher(graph)\n\ndef test():\n\tblock = matcher.match(\"TxBlock\", BlockNum=str(122)).first()\n\tmined = matcher_r.match([block], \"mined\").first()\n\tfor _ in walk(mined):\n\t\tprint(type(_), _)\n\ttxss = matcher.match(\"Tx\", epoch_num=\"59903\")\n\ttxs = []\n\tfor tx in txss:\n\t\ttxs.append(tx)\n\ttxs = sorted(txs, key=lambda tx: int(tx[\"amount\"]))\n\tfor tx in txs:\n\t\tprint(tx)\n\taddress = \"46c39a963a1f26e08de63d0a27c88e01c6d8a6f5\"\n\taccount = matcher.match(\"Account\", address=address).first()\n\tprint(account)\n\n\ttxs = matcher.match(\"Tx\")\n\ti = 2\n\tfor tx in txs:\n\t\tprint(tx)\n\t\tif not i:\n\t\t\tbreak\n\t\ti -= 1\n\ndef deleteTxBlock(start, end):\n\tfor i in range(start, end):\n\t\tprint(i)\n\t\ttxblock = matcher.match(\"TxBlock\").where(BlockNum=str(i)).first()\n\t\tif txblock:\n\t\t\tprint(\"delete\")\n\t\t\tgraph.delete(txblock)\n\n# 获取BlockNum 从start到end过程中的矿工\ndef getMiners(start, end):\n\tminers = []\n\tfor i in range(start, end+1):\n\t\tblock = matcher.match(\"TxBlock\", BlockNum=str(i)).first()\n\t\tmined = matcher_r.match([block], \"mined\").first()\n\t\tminers.append(mined.end_node)\n\treturn miners\n\ndef getAccountTxs(address):\n\ttxs = []\n\taccount = matcher.match(\"Account\", address=address).first()\n\tsends = matcher_r.match([account], \"send\")\n\treces = matcher_r.match([account], \"receive\")\n\tfor send in sends:\n\t\tj = 0\n\t\tfor _ in walk(send):\n\t\t\tj += 1\n\t\t\tif j == 3:\n\t\t\t\ttxs.append(_)\n\tfor rece in reces:\n\t\tj = 0\n\t\tfor _ in walk(rece):\n\t\t\tj += 1\n\t\t\tif j == 3:\n\t\t\t\ttxs.append(_)\n\treturn txs\n\n\t# txs = getAccountTxs(\"A1E7973854dE977A86e307F127Ad4B00312ae03F\")\n\t# for tx in txs:\n\t# \tprint(tx)\n\n# 返回address 账户挖的所有块\ndef getMinedBlocks(address):\n\taccount, blocks = matcher.match(\"Account\", address=address).first(), []\n\tmines = matcher_r.match([account], \"mine\")\n\tfor mine in mines:\n\t\t# print(list(mine.types()[0]))\n\t\tblocks.append(mine.end_node)\n\t\tbreak\n\treturn blocks\n\n\t# address = \"9b9527237c8b64daa54229a2e290ae6ab563a380\"\n\t# blocks = getMinedBlocks(address)\n\t# for block in blocks:\n\t# \tnum = block[\"NumTxns\"]\n\t# blocks = [dict(block) for block in blocks]\n\t# print(json.dumps(blocks))\n\ndef getBalance(address):\n\treturn matcher.match(\"Account\", address=address).first()\n\n\t# address = \"9b9527237c8b64daa54229a2e290ae6ab563a380\"\n\t# # account = graph.run(\"match(n:Account{address:'9b9527237c8b64daa54229a2e290ae6ab563a380'}) return n\")\n\t# accounts = graph.run(\"match (a:TxBlock)-[r:has]->(b:Tx) return r limit 10\")\n\t# print(accounts.keys())\n\n\t# res = getBalance(\"2333\")\n\t# if not res[0]:\n\t# \tprint(\"error\")\n\t# address = \"9b9527237c8b64daa54229a2e290ae6ab563a380\"\n\t# account = getBalance(address)\n\t# print(account)\n\t# label_attr = {\"TxBlock\": \"BlockNum\", \"DsBlock\": \"BlockNum\", \"Account\": \"address\", \"Tx\": \"TxHash\"}\n\t# nodes = [getBalance(\"9b9527237c8b64daa54229a2e290ae6ab563a380\")]\n\t# label = \"has\"\n\t# print(\"Relation %s of <%s: %s> is not existed!\" % (\n\t# \tlabel, list(nodes[0].labels)[0], nodes[0][label_attr[list(nodes[0].labels)[0]]]\n\t# ))\n\n\t# rels = matcher_r.match(r_type=\"233\")\n\t# for _ in rels:\n\t# \tprint(_)\n\n# miners = getMiners(1, 100)\n# for miner in miners:\n# \tprint(miner)\ndeleteTxBlock(175334, 183160)"
},
{
"alpha_fraction": 0.4815923869609833,
"alphanum_fraction": 0.7805178761482239,
"avg_line_length": 33.4121208190918,
"blob_id": "5986bcd4c7358e2c305c2e0c2d342e5b8f2ba2a2",
"content_id": "26dafc3e7356c4bfdaf74d328a201f0db0ac0cb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5677,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 165,
"path": "/documents/apis.md",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "## APIs INTRODUCTION\n---\n### getBalance\nReturns the <Account [py2neo.data.Node](https://py2neo.org/v4/data.html#py2neo.data.Node) Type> with its balance. You can use dict(result) function to get the result of <Python Dict Type>.\n\n#### ARGUMENTS\n|Parameter|Type|Required|Description|\n|--|--|--|--|\n|**address**|string|Required|\"The account's address\"|\n\n#### Example response:\n```json\n{\n\taddress: '46c39a963a1f26e08de63d0a27c88e01c6d8a6f5',\n\tbalance: '97000000000',\n\texisted: 'true'\n}\n```\n---\n### getTxs\nReturns the sorted array of <Tx [py2neo.data.Node](https://py2neo.org/v4/data.html#py2neo.data.Node) Type> between 2 account.\n\n#### ARGUMENTS\n|Parameter|Type|Required|Description|\n|--|--|--|--|\n|**address1**|string|Required|\"The account1's address\"|\n|**address2**|string|Required|\"The account2's address\"|\n|**type**|string|Required|\"The query type('all'[default], 'recent', 'period')\"|\n|**start**|int|Optional **Required if type='period'**|\"Start blocknum\"|\n|**end**|int|Optional **Required if type='period'**|\"End blocknum\"|\n\n#### Example response:\n```json\n[{\n\tID: '1f6c2d39cdf657e2253bb9fcf414b5988ad007818b4f2b19a1beff7e136318a2',\n\tamount: '674962570466901',\n\tcumulative_gas: '1',\n\tepoch_num: '59903',\n\tfromAddr: '7ccce6d75c35e1866b87ada3c0f965aae725d49a',\n\tgasLimit: '1',\n\tgasPrice: '1000000000',\n\tnonce: '1',\n\tsenderPubKey: '0x024EAF6FCD4223E66A851856AD3B5FE5E96E835F9B4EA7C4385ED3A4ED8744227B',\n\tsignature: '0x3FA9F64FA030774A331C3E743375DFC12924CCA770705E82A44FC018E208E5614DA32AD745BC8F1D72F01335CBFF212E078964F93E0F792E1F2D6C54B0C14C24',\n\tsuccess: 'True',\n\ttoAddr: 'd942c5606f3fb2e34f1c0933c9406f0453be7f9a',\n\tversion: '65537'\n},\n{\n\tID: '211283d4f97f6e45cc051cacbd8dc7ea854f8bce41ec818b464fd0531cafedbd',\n\tamount: '676921020823241',\n\tcumulative_gas: '1',\n\tepoch_num: '59903',\n\tfromAddr: 'c0985f691456b5e6b386840b97d8cf5325bbe076',\n\tgasLimit: '1',\n\tgasPrice: '1000000000',\n\tnonce: '1',\n\tsenderPubKey: '0x0248670D4586B012452CCE3C379CFABB7A8C51E3FEECF3DE69D4083BA2BDDBDAF3', signature: '0x1AFC8E44530DA1165D9FB8A13936990C7F1157A230DD8ADDA7DBCB6A78B8CC0B9948A13B2C3A362C5982E2622AEF1AB199664F06B28F6D0440526207AFC4EFA3',\n\tsuccess: 'True',\n\ttoAddr: 'd942c5606f3fb2e34f1c0933c9406f0453be7f9a',\n\tversion: '65537'\n}]\n```\n---\n### getMiners\nReturns the array of <Account [py2neo.data.Node](https://py2neo.org/v4/data.html#py2neo.data.Node) Type> between 2 blocknum.\n\n#### ARGUMENTS\n|Parameter|Type|Required|Description|\n|--|--|--|--|\n|**start**|int|Required|\"The start blocknum\"|\n|**end**|int|Required|\"The end blocknum\"|\n\n#### Example response:\n```json\n[{\n\taddress: '0a8323ac339f42fbc2670f9de68390fa43c77c2e',\n\tbalance: '0',\n\texisted: 'false'\n},\n{\n\taddress: 'd8b4397a0303c12830a95abc225585af71f41d89',\n\tbalance: '0',\n\texisted: 'false'\n}]\n```\n---\n### getAccountTxs\nReturns the array of <Tx [py2neo.data.Node](https://py2neo.org/v4/data.html#py2neo.data.Node) Type> of an account.\n\n#### ARGUMENTS\n|Parameter|Type|Required|Description|\n|--|--|--|--|\n|**address**|string|Required|\"The account's address\"|\n\n#### Example response:\n```json\n[{\n\tID: '825878decc4fe3f202815817c27c558dd0f5890514821dfc8aeeea03753e5bd6',\n\tamount: '669700111576395',\n\tcumulative_gas: '1',\n\tepoch_num: '59903',\n\tfromAddr: '93d2ea3c47b084bd358629d3ac741e080ac72ac7',\n\tgasLimit: '1',\n\tgasPrice: '1000000000',\n\tnonce: '1',\n\tsenderPubKey: '0x025B0D54DFCABB3FC095A2577F60D6A4909FEDB3FCC0DA5920B2AFF77272E25B40',\n\tsignature: '0x0D1A1F37EAA28529380CF4FA6FA878902BE9E995636F0D74387952B70DEACFA9854F69D28510A7273F9B45BEE8A272C803C9A9901A78200325172C3E238058AD',\n\tsuccess: 'True',\n\ttoAddr: 'd942c5606f3fb2e34f1c0933c9406f0453be7f9a',\n\tversion: '65537'\n},\n{\n\tID: '8afe62bff3532c8d582437cf9f1fa9fa8beb22a74898e773036e521156ee11cb',\n\tamount: '242137739980056',\n\tcumulative_gas: '1',\n\tepoch_num: '59903',\n\tfromAddr: '272115809054dd02ec5488601a8d5c883bedac83',\n\tgasLimit: '1',\n\tgasPrice: '1000000000',\n\tnonce: '1',\n\tsenderPubKey: '0x027DEEF14A42798159216692B0F7683A069D3D5A9C16866204D95F06D11789A457',\n\tsignature: '0xD78EAC5804AF62E9FFB9138FFF4446BFA55BC5A8B5FEA78ED161A05D620775D0302BFB54DFCC62A4F1294E442FF6D8F793F56B89635B3825F6DE38BF2D6DE4A4',\n\tsuccess: 'True',\n\ttoAddr: '422c85ab78f955776898c646f4a81a2d4c0b0f4d',\n\tversion: '65537'\n}]\n```\n---\n### getMinedBlock\nReturns the array of <Tx/DsBlock [py2neo.data.Node](https://py2neo.org/v4/data.html#py2neo.data.Node) Type> mined by an account.\n\n#### ARGUMENTS\n|Parameter|Type|Required|Description|\n|--|--|--|--|\n|**address**|string|Required|\"The account's address\"|\n\n#### Example response:\n```json\n{\n\tBlockNum: '61261',\n\tDSBlockNum: '613',\n\tGasLimit: '2000000',\n\tGasUsed: '0',\n\tHeaderSign: '13BA2E480D49BB6060B5E2D4FB6B25F9E43C86DCD1160A20F2FB44BFB487A0C6CA1B9F7CB297A94434D02A8E791236D60968AF6EA2343298B45AE6BD27BBC24F',\n\tMbInfoHash: 'e77327418034db80637759eec72181a9cb6b29b448763aad7cc883215865d87c',\n\tMicroBlock0Hash: '2a7ce94a4a26dfe2e1d10cbe85cab05162c450027b1ae0f81410fd9b71bf24e6',\n\tMicroBlock0TxnRootHash: '0000000000000000000000000000000000000000000000000000000000000000',\n\tMicroBlock1Hash: 'f2c1135635a4160cbc324aa93a3897859958ea6672abe7632aa775adcd315029',\n\tMicroBlock1TxnRootHash: '0000000000000000000000000000000000000000000000000000000000000000',\n\tMicroBlock2Hash: 'e589c735198da5436aaa0dbaf8c0f84fa0191580f02d21b43c7971370067f146',\n\tMicroBlock2TxnRootHash: '0000000000000000000000000000000000000000000000000000000000000000',\n\tMicroBlock3Hash: 'd1bea3286727b46d0029e8b59f7be34d67f6d7fc3cdb1ad435e754a1aa8f1ec5',\n\tMicroBlock3TxnRootHash: '0000000000000000000000000000000000000000000000000000000000000000',\n\tMinerPubKey: '0x0204D5A9E32D12412DCAC740C11CBFE85555C3A22591BF733AF6E65C2A3E69F06D',\n\tNumMicroBlocks: 4,\n\tNumTxns: 0,\n\tPrevBlockHash: '52fa49fdca8ca999644ca140c4d595839d450b2c207574ca8938cbef3a12101d',\n\tRewards: '0',\n\tStateDeltaHash: '0000000000000000000000000000000000000000000000000000000000000000',\n\tStateRootHash: '442b66ad1758906e3c8997e3f5681c50f819a5d3ea357dbe3e154666afff4be4',\n\tTimestamp: '1554284563581924',\n\tVersion: 1\n}\n```"
},
{
"alpha_fraction": 0.6666337847709656,
"alphanum_fraction": 0.676885187625885,
"avg_line_length": 35.496402740478516,
"blob_id": "3c0cc9a132ac2dd5e23f250ac57cdfa71b291bac",
"content_id": "7f427205dcbfe2912a650aac0b0574f8c7a89e97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11013,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 278,
"path": "/scripts/getdata.py",
"repo_name": "COLD-PLAY/ZilliqaGraphQL",
"src_encoding": "UTF-8",
"text": "__author__ = \"ZhouLiao\"\nimport requests, execjs, time\nfrom py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher\nfrom func_timeout import *\nfrom pyzil.account import Account\nfrom pyzil.crypto.zilkey import is_bech32_address\n\ndata = {\n\t\"id\": \"1\",\n\t\"jsonrpc\": \"2.0\",\n\t\"method\": \"\",\n\t\"params\": [\"\"]\n}\n\nclass GetData(object):\n\tdef __init__(self, uri, user, pwd, curTxBlockNum):\n\t\tself.graph = Graph(uri, username=user, password=pwd)\n\t\tself.matcher_node = NodeMatcher(self.graph)\n\t\tself.matcher_relation = RelationshipMatcher(self.graph)\n\t\tself.curTxBlockNum = curTxBlockNum\n\t\t# self.graph.delete_all()\n\t\tself.jsCode = '''\n\t\t\tvar hashjs = require('hash.js');\n\t\t\tfunction getAddress(pubKey) {\n\t\t\t\treturn hashjs.sha256().update(pubKey, 'hex').digest('hex').slice(24);\n\t\t\t}\n\t\t'''\n\t\tself.eval = execjs.compile(self.jsCode)\n\t\n\t# 输出至控制台以及log 文件中\n\tdef printf(self, message):\n\t\t# print(message)\n\t\twith open(\"../log/%s.log\" % time.strftime('%Y-%m-%d',time.localtime(time.time())), \"a\") as file:\n\t\t\tfile.write(message + '\\r')\n\n\tdef getAddress(self, address, type_):\n\t\tif type_ == \"pubKey\":\n\t\t\told_address = self.eval.call(\"getAddress\", address[2:])\n\t\t\taccount = Account(address=old_address)\n\t\t\treturn (old_address, account.bech32_address)\n\t\telif type_ == \"toBech32\":\n\t\t\tif is_bech32_address(address): # no need to transfer to bech32 format\n\t\t\t\treturn (address, address)\n\t\t\taccount = Account(address=address)\n\t\t\treturn (address, account.bech32_address)\n\n\tdef getResult(self, method, params):\n\t\tdata[\"method\"] = method\n\t\tdata[\"params\"] = [params]\n\t\ttry:\n\t\t\tres = requests.post(\"https://api.zilliqa.com/\", json=data, timeout=10).json()\n\t\t\treturn res[\"result\"] if \"result\" in res else res\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tself.printf(\"ERROR: Api Connect Timeout: Recall Api!\")\n\t\t\treturn self.getResult(method, params)\n\n\tdef getTxBlcokData(self, startBlock, endBlock):\n\t\tfor i in range(startBlock, endBlock+1):\n\t\t\tself.getOneTxBlcokData(str(i))\n\n\tdef getOneTxBlcokData(self, blockNum):\n\t\tself.printf(\"the %s TxBlock %s\" % (blockNum, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\t\t# 若该Block存在,则删除\n\t\ttxBlock = self.matcher_node.match(\"TxBlock\", BlockNum=blockNum).first()\n\t\tif txBlock:\n\t\t\tself.printf(\"TxBlock \\t%s existed!\" % blockNum)\n\t\t\tself.graph.delete(txBlock)\n\n\t\tres = self.getResult(\"GetTxBlock\", blockNum)\n\t\twhile \"header\" not in res or res[\"header\"][\"Timestamp\"] == \"0\":\n\t\t\t# 表示到达最新的一个块,等一分钟再继续更新\n\t\t\tself.printf(\"Waiting for Next Block %s\" % time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime()))\n\t\t\ttime.sleep(60)\n\t\t\tres = self.getResult(\"GetTxBlock\", blockNum)\n\t\t# 创建TxBlock节点以及与所属DSBlock之间的从属关系\n\t\tTxBlock = Node(\"TxBlock\")\n\t\tTxBlock.update({\"HeaderSign\": res[\"body\"][\"HeaderSign\"]})\n\t\tTxBlock.update(res[\"header\"])\n\t\t# 由于一个TxBlock块由多个MicroBlock组成\n\t\tfor index, block in enumerate(res[\"body\"][\"MicroBlockInfos\"]):\n\t\t\tTxBlock.update({\"MicroBlock%dHash\" % index: block[\"MicroBlockHash\"], \"MicroBlock%dTxnRootHash\" % index: block[\"MicroBlockTxnRootHash\"]})\n\t\tDsBlock = self.matcher_node.match(\"DsBlock\", BlockNum=res[\"header\"][\"DSBlockNum\"]).first()\n\t\t# 更新当前TxBlock 所对应的DsBlock\n\t\tif not DsBlock:\n\t\t\tDsBlock = self.getOneDsBlcokData(res[\"header\"][\"DSBlockNum\"])\n\t\tTx2Ds = Relationship(TxBlock, \"in\", DsBlock)\n\t\tDs2Tx = Relationship(DsBlock, \"has\", TxBlock)\n\t\t##########################################################\n\t\t# 为has 和in 属性添加排序信息,即TxBlock 是DSBlock 中的第几个\n\t\t# cur_number = len(self.matcher_relation.match([DsBlock], \"has\"))\n\t\torder = int(blockNum) % 100\n\t\tTx2Ds[\"order\"] = order+1\n\t\tDs2Tx[\"order\"] = order+1\n\n\t\t# 创建Miner节点及其与TxBlock节点之间的挖与被挖关系\n\t\tminer_addr_old, miner_addr_new = self.getAddress(res[\"header\"][\"MinerPubKey\"], type_=\"pubKey\")\n\t\tMiner, existed = self.getAccountData(miner_addr_old, miner_addr_new)\n\t\tMiner2Tx = Relationship(Miner, \"mine\", TxBlock)\n\t\tTx2Miner = Relationship(TxBlock, \"mined\", Miner)\n\n\t\tself.graph.create(TxBlock)\n\t\tself.graph.create(Tx2Ds)\n\t\tself.graph.create(Ds2Tx)\n\t\tif not existed:\n\t\t\tself.graph.create(Miner)\n\t\tself.graph.create(Miner2Tx)\n\t\tself.graph.create(Tx2Miner)\n\n\t\t# 当TxBlock中有交易时,获取交易信息以及交易双方的账户信息并存入Neo4j中\n\t\tif res[\"header\"][\"NumTxns\"]:\n\t\t\tself.printf(\"%d Txs in all\" % res[\"header\"][\"NumTxns\"])\n\t\t\tself.getTxData(res[\"header\"][\"BlockNum\"])\n\n\tdef getDsBlockData(self, startBlock, endBlock):\n\t\tfor i in range(startBlock, endBlock+1):\n\t\t\tself.getOneDsBlcokData(str(i))\n\n\t# 返回DsBlock 节点\n\tdef getOneDsBlcokData(self, blockNum):\n\t\tself.printf(\"the %s DsBlock %s\" % (blockNum, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\t\tres = self.getResult(\"GetDsBlock\", blockNum)\n\t\t# 创建DSBlock节点\n\t\tDsBlock = Node(\"DsBlock\")\n\t\tDsBlock.update(res[\"header\"])\n\t\tDsBlock.update({\"signature\": res[\"signature\"]})\n\n\t\t# 创建Miner节点及其与DsBlock节点之间的挖与被挖关系\n\t\tminer_addr_old, miner_addr_new = self.getAddress(res[\"header\"][\"LeaderPubKey\"], type_=\"pubKey\")\n\t\tMiner, existed = self.getAccountData(miner_addr_old, miner_addr_new)\n\t\tMiner2Ds = Relationship(Miner, \"mine\", DsBlock)\n\t\tDs2Miner = Relationship(DsBlock, \"mined\", Miner)\n\n\t\tself.graph.create(DsBlock)\n\t\tif not existed:\n\t\t\tself.graph.create(Miner)\n\t\tself.graph.create(Miner2Ds)\n\t\tself.graph.create(Ds2Miner)\n\t\treturn self.matcher_node.match(\"DsBlock\", BlockNum=blockNum).first()\n\t\n\tdef getTxData(self, txBlockNum):\n\t\tres, order = self.getResult(\"GetTransactionsForTxBlock\", txBlockNum), 1\n\t\twhile \"error\" in res:\n\t\t\tself.printf(\"ERROR: get microblock failed! recatch txs!\" + res[\"error\"][\"message\"])\n\t\t\tres = self.getResult(\"GetTransactionsForTxBlock\", txBlockNum)\n\t\tfor MicroBlock in res:\n\t\t\tif not MicroBlock:\n\t\t\t\tcontinue\n\t\t\tfor txHash in MicroBlock:\n\t\t\t\tself.getOneTxData(txBlockNum, txHash, order)\n\t\t\t\torder += 1\n\t\n\t# 获取一条交易的信息\n\t@func_set_timeout(60)\n\tdef getOneTxData(self, txBlockNum, txHash, order):\n\t\tself.printf(\"The %s transaction %s\" % (txHash, time.strftime(\"%y-%m-%d %H:%M:%S\", time.localtime())))\n\t\tTx = self.matcher_node.match(\"Tx\", ID=txHash).first()\n\t\tif Tx:\n\t\t\tself.printf(\"Tx %s existed!\" % txHash)\n\t\t\t# Tx 已存在时,仍需建立TxBlock 与Tx 之间的关系\n\t\t\tTxBlock = self.matcher_node.match(\"TxBlock\", BlockNum=txBlockNum).first()\n\t\t\tTx2TxBlock = Relationship(Tx, \"in\", TxBlock)\n\t\t\tTxBlock2Tx = Relationship(TxBlock, \"has\", Tx)\n\t\t\t##########################################################\n\t\t\t# 为has 和in 属性添加排序信息,即Tx 是TxBlock 中的第几个\n\t\t\tTx2TxBlock[\"order\"] = order\n\t\t\tTxBlock2Tx[\"order\"] = order\n\t\t\tself.graph.create(Tx2TxBlock)\n\t\t\tself.graph.create(TxBlock2Tx)\n\t\t\treturn\n\n\t\tres_ = self.getResult(\"GetTransaction\", txHash)\n\t\t# 获取交易信息时出错\n\t\t############################################# 待修改\n\t\tif \"error\" in res_:\n\t\t\tself.printf(\"ERROR: get txn info failed!\" + res_[\"error\"][\"message\"])\n\t\t\treturn -1\n\t\t\t\n\t\t# 解决调用api 时可能产生的问题\n\t\twhile \"receipt\" not in res_:\n\t\t\tres_ = self.getResult(\"GetTransaction\", txHash)\n\n\t\t# receipt不符合格式 重新规整一下\n\t\tres_[\"cumulative_gas\"] = res_[\"receipt\"][\"cumulative_gas\"]\n\t\tres_[\"epoch_num\"] = res_[\"receipt\"][\"epoch_num\"]\n\t\tres_[\"success\"] = str(res_[\"receipt\"][\"success\"])\n\t\tres_.pop(\"receipt\")\n\n\t\tfrom_addr_old, from_addr_new = self.getAddress(res_[\"senderPubKey\"], type_=\"pubKey\")\n\t\tto_addr_old, to_addr_new = self.getAddress(res_[\"toAddr\"], type_=\"toBech32\")\n\n\t\t# 创建Tx的节点以及与所属TxBlock之间的从属关系\n\t\tTx = Node(\"Tx\")\n\t\tTx.update(res_)\n\t\t# 由于res_[\"senderPubKey\"]不是直接的地址,需要转换一下再将发送者的地址存储,新增bech32 格式地址存储\n\t\tTx.update({\"fromAddrNew\": from_addr_new, \"toAddrNew\": to_addr_new})\n\t\t\n\t\tTxBlock = self.matcher_node.match(\"TxBlock\", BlockNum=txBlockNum).first()\n\t\tTx2TxBlock = Relationship(Tx, \"in\", TxBlock)\n\t\tTxBlock2Tx = Relationship(TxBlock, \"has\", Tx)\n\t\t##########################################################\n\t\t# 为has 和in 属性添加排序信息,即Tx 是TxBlock 中的第几个\n\t\tTx2TxBlock[\"order\"] = order\n\t\tTxBlock2Tx[\"order\"] = order\n\n\t\t# 创建账户节点以及所参与Tx之间的发送接收关系,其中返回中\n\t\t# 有一个标记信息,表示数据库中是否已经存在该节点\n\t\tFrom_Account, existed_from = self.getAccountData(from_addr_old, from_addr_new)\n\t\tTo_Account, existed_to = self.getAccountData(to_addr_old, to_addr_new)\n\t\tFrom2Tx = Relationship(From_Account, \"send\", Tx)\n\t\tTx2From = Relationship(Tx, \"from\", From_Account)\n\t\tTo2Tx = Relationship(To_Account, \"receive\", Tx)\n\t\tTx2To = Relationship(Tx, \"to\", To_Account)\n\n\t\tFrom2To = Relationship(From_Account, \"traded\", To_Account)\n\t\t###################################################\n\t\t# 之前便加过TxHash\n\t\tFrom2To['TxHash'] = txHash\n\t\tTo2From = Relationship(To_Account, \"traded\", From_Account)\n\t\tTo2From['TxHash'] = txHash\n\n\t\tself.graph.create(Tx)\n\t\tself.graph.create(Tx2TxBlock)\n\t\tself.graph.create(TxBlock2Tx)\n\t\t# 若没在数据库中\n\t\tif not existed_from:\n\t\t\tself.graph.create(From_Account)\n\t\tif not existed_to:\n\t\t\tself.graph.create(To_Account)\n\t\tself.graph.create(From2Tx)\n\t\tself.graph.create(Tx2From)\n\t\tself.graph.create(To2Tx)\n\t\tself.graph.create(Tx2To)\n\t\tself.graph.create(From2To)\n\t\tself.graph.create(To2From)\n\n\tdef getAccountData(self, address_old, address_new):\n\t\t# 当账户已经在数据库中时:\n\t\tAccount = self.matcher_node.match(\"Account\", address=address_old).first()\n\t\tres = self.getResult(\"GetBalance\", address_old)\n\t\tif Account:\n\t\t\t# self.printf(\"account %s existed!\" % address)\n\t\t\tif \"error\" in res:\n\t\t\t\tAccount.update({\"balance\": \"0\", \"existed\": \"false\"})\n\t\t\telse:\n\t\t\t\tAccount.update({\"balance\": res[\"balance\"], \"existed\": \"true\"})\n\t\t\tself.graph.push(Account)\n\t\t\treturn Account, 1\n\t\tAccount = Node(\"Account\", address=address_old)\n\t\tAccount.update({\"address_new\": address_new})\n\t\t# 获取账户当前的余额\n\t\tif \"error\" in res:\n\t\t\tAccount.update({\"balance\": \"0\", \"existed\": \"false\"})\n\t\telse:\n\t\t\tAccount.update({\"balance\": res[\"balance\"], \"existed\": \"true\"})\n\t\treturn Account, 0\n\n\tdef run(self):\n\t\t# 先对DSBlock进行存储\n\t\t# self.getDsBlockData()\n\t\t# 在对TxBlock存储的过程中将所有的交易信息\n\t\t# 以及交易过程中的账户信息一并存储到数据库\n\t\tcurTxBlock = self.matcher_node.match(\"TxBlock\", BlockNum=str(self.curTxBlockNum)).first()\n\t\tif curTxBlock: self.graph.delete(curTxBlock)\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tself.getOneTxBlcokData(str(self.curTxBlockNum))\n\t\t\t\tself.curTxBlockNum += 1\n\t\texcept FunctionTimedOut:\n\t\t\tself.printf(\"ERROR: Running Timeout: Restart!\")\n\t\t\tself.run()\n\t\texcept Exception as e:\n\t\t\tself.printf(\"ERROR: %s: Restart!\" % str(e))\n\t\t\tself.run()\n\nif __name__ == \"__main__\":\n\turi, user, pwd, curTxBlockNum = \"http://localhost:7474\", \"neo4j\", \"liaozhou1998\", 320752\n\tGeter = GetData(uri, user, pwd, curTxBlockNum)\n\tGeter.run()"
}
] | 11 |
sapphire008/Python
|
https://github.com/sapphire008/Python
|
15d3d7885ac82333654b6729c2a57ed760e796a8
|
b2783eabb1987091051614b8f12a4778e158a90b
|
dbf48e804e1792999854832e64a7dae9f42f71e2
|
refs/heads/master
| 2023-08-09T04:38:43.077285 | 2023-07-28T18:36:03 | 2023-07-28T18:36:03 | 9,880,648 | 15 | 7 | null | null | null | null | null |
[
{
"alpha_fraction": 0.37699466943740845,
"alphanum_fraction": 0.4468750059604645,
"avg_line_length": 40.849002838134766,
"blob_id": "293add9e95e16cb9bd98082f99596d02f3279412",
"content_id": "aecfdd5f092de46acd3111235e9d11f095c2f857",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15044,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 351,
"path": "/generic/locale.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (C) 2007-2008 Dieter Verfaillie <[email protected]>\r\n# Copyright 2009-2010 Zuza Software Foundation\r\n# Copyright 2013-2014 F Wolff\r\n#\r\n# (NOTE: LGPL)\r\n# This library is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU Lesser General Public\r\n# License as published by the Free Software Foundation; either\r\n# version 2 of the License, or (at your option) any later version.\r\n#\r\n# This library is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r\n# Lesser General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Lesser General Public\r\n# License along with this library; If not, see <http://www.gnu.org/licenses/>.\r\n\r\n\r\nimport os\r\nimport sys\r\n\r\n\r\ndef _isofromlangid(langid):\r\n # ISO 639-1\r\n # http://www.loc.gov/standards/iso639-2/\r\n # List of existing mui packs:\r\n # http://www.microsoft.com/globaldev/reference/win2k/setup/Langid.mspx\r\n # List of known id's\r\n # http://www.microsoft.com/globaldev/reference/lcid-all.mspx\r\n\r\n lcid = {1078: 'af', # Afrikaans - South Africa\r\n 1052: 'sq', # Albanian - Albania\r\n #1156: 'gsw', # Alsatian\r\n 1118: 'am', # Amharic - Ethiopia\r\n 1025: 'ar', # Arabic - Saudi Arabia\r\n 5121: 'ar', # Arabic - Algeria\r\n 15361: 'ar', # Arabic - Bahrain\r\n 3073: 'ar', # Arabic - Egypt\r\n 2049: 'ar', # Arabic - Iraq\r\n 11265: 'ar', # Arabic - Jordan\r\n 13313: 'ar', # Arabic - Kuwait\r\n 12289: 'ar', # Arabic - Lebanon\r\n 4097: 'ar', # Arabic - Libya\r\n 6145: 'ar', # Arabic - Morocco\r\n 8193: 'ar', # Arabic - Oman\r\n 16385: 'ar', # Arabic - Qatar\r\n 10241: 'ar', # Arabic - Syria\r\n 7169: 'ar', # Arabic - Tunisia\r\n 14337: 'ar', # Arabic - U.A.E.\r\n 9217: 'ar', # Arabic - Yemen\r\n 1067: 'hy', # Armenian - Armenia\r\n 1101: 'as', # Assamese\r\n 2092: 'az', # Azeri (Cyrillic)\r\n 1068: 'az', # Azeri (Latin)\r\n 1133: 'ba', # Bashkir\r\n 1069: 'eu', # Basque\r\n 1059: 'be', # Belarusian\r\n 1093: 'bn_IN', # Bengali (India)\r\n 2117: 'bn', # Bengali (Bangladesh)\r\n 5146: 'bs', # Bosnian (Bosnia/Herzegovina)\r\n 1150: 'br', # Breton\r\n 1026: 'bg', # Bulgarian\r\n 1109: 'my', # Burmese\r\n 1027: 'ca', # Catalan\r\n 1116: 'chr', # Cherokee - United States\r\n 2052: 'zh_CN', # Chinese - People's Republic of China\r\n 4100: 'zh', # Chinese - Singapore\r\n 1028: 'zh_TW', # Chinese - Taiwan\r\n 3076: 'zh_HK', # Chinese - Hong Kong SAR\r\n 5124: 'zh', # Chinese - Macao SAR\r\n 1155: 'co', # Corsican\r\n 1050: 'hr', # Croatian\r\n 4122: 'hr', # Croatian (Bosnia/Herzegovina)\r\n 1029: 'cs', # Czech\r\n 1030: 'da', # Danish\r\n #1164: 'fa_AF' # Dari\r\n 1125: 'dv', # Divehi\r\n 1043: 'nl', # Dutch - Netherlands\r\n 2067: 'nl', # Dutch - Belgium\r\n 1126: 'bin', # Edo\r\n 1033: 'en', # English - United States\r\n 2057: 'en_UK', # English - United Kingdom\r\n 3081: 'en', # English - Australia\r\n 10249: 'en', # English - Belize\r\n 4105: 'en_CA', # English - Canada\r\n 9225: 'en', # English - Caribbean\r\n 15369: 'en', # English - Hong Kong SAR\r\n 16393: 'en', # English - India\r\n 14345: 'en', # English - Indonesia\r\n 6153: 'en', # English - Ireland\r\n 8201: 'en', # English - Jamaica\r\n 17417: 'en', # English - Malaysia\r\n 5129: 'en', # English - New Zealand\r\n 13321: 'en', # English - Philippines\r\n 18441: 'en', # English - Singapore\r\n 7177: 'en_ZA', # English - South Africa\r\n 11273: 'en', # English - Trinidad\r\n 12297: 'en', # English - Zimbabwe\r\n 1061: 'et', # Estonian\r\n 1080: 'fo', # Faroese\r\n 1065: 'fa', # Persian\r\n 1124: 'fil', # Filipino #XXX: GTK uses Tagalog (tl)\r\n 1035: 'fi', # Finnish\r\n 1036: 'fr', # French - France\r\n 2060: 'fr', # French - Belgium\r\n 11276: 'fr', # French - Cameroon\r\n 3084: 'fr', # French - Canada\r\n 9228: 'fr', # French - Democratic Rep. of Congo\r\n 12300: 'fr', # French - Cote d'Ivoire\r\n 15372: 'fr', # French - Haiti\r\n 5132: 'fr', # French - Luxembourg\r\n 13324: 'fr', # French - Mali\r\n 6156: 'fr', # French - Monaco\r\n 14348: 'fr', # French - Morocco\r\n 58380: 'fr', # French - North Africa\r\n 8204: 'fr', # French - Reunion\r\n 10252: 'fr', # French - Senegal\r\n 4108: 'fr', # French - Switzerland\r\n 7180: 'fr', # French - West Indies\r\n 1122: 'fy', # Frisian - Netherlands\r\n 1127: 'ff', # Fulfulde - Nigeria\r\n 1071: 'mk', # FYRO Macedonian\r\n 2108: 'ga', # Gaelic (Ireland)\r\n 1084: 'gd', # Gaelic (Scotland)\r\n 1110: 'gl', # Galician\r\n 1079: 'ka', # Georgian\r\n 1031: 'de', # German - Germany\r\n 3079: 'de', # German - Austria\r\n 5127: 'de', # German - Liechtenstein\r\n 4103: 'de', # German - Luxembourg\r\n 2055: 'de', # German - Switzerland\r\n 1032: 'el', # Greek\r\n 1135: 'kl', # Greenlandic\r\n 1140: 'gn', # Guarani - Paraguay\r\n 1095: 'gu', # Gujarati\r\n 1128: 'ha', # Hausa - Nigeria\r\n 1141: 'haw', # Hawaiian - United States\r\n 1037: 'he', # Hebrew\r\n 1081: 'hi', # Hindi\r\n 1038: 'hu', # Hungarian\r\n 1129: 'ibb', # Ibibio - Nigeria\r\n 1039: 'is', # Icelandic\r\n 1136: 'ig', # Igbo - Nigeria\r\n 1057: 'id', # Indonesian\r\n 1117: 'iu', # Inuktitut\r\n 1040: 'it', # Italian - Italy\r\n 2064: 'it', # Italian - Switzerland\r\n 1041: 'ja', # Japanese\r\n 1158: 'quc', # K'iche\r\n 1099: 'kn', # Kannada\r\n 1137: 'kr', # Kanuri - Nigeria\r\n 2144: 'ks', # Kashmiri\r\n 1120: 'ks', # Kashmiri (Arabic)\r\n 1087: 'kk', # Kazakh\r\n 1107: 'km', # Khmer\r\n 1159: 'rw', # Kinyarwanda\r\n 1111: 'knn', # Konkani\r\n 1042: 'ko', # Korean\r\n 1088: 'ky', # Kyrgyz (Cyrillic)\r\n 1108: 'lo', # Lao\r\n 1142: 'la', # Latin\r\n 1062: 'lv', # Latvian\r\n 1063: 'lt', # Lithuanian\r\n 1134: 'lb', # Luxembourgish\r\n 1086: 'ms', # Malay - Malaysia\r\n 2110: 'ms', # Malay - Brunei Darussalam\r\n 1100: 'ml', # Malayalam\r\n 1082: 'mt', # Maltese\r\n 1112: 'mni', # Manipuri\r\n 1153: 'mi', # Maori - New Zealand\r\n 1146: 'arn', # Mapudungun\r\n 1102: 'mr', # Marathi\r\n 1148: 'moh', # Mohawk\r\n 1104: 'mn', # Mongolian (Cyrillic)\r\n 2128: 'mn', # Mongolian (Mongolian)\r\n 1121: 'ne', # Nepali\r\n 2145: 'ne', # Nepali - India\r\n 1044: 'no', # Norwegian (Bokmᅢᆬl)\r\n 2068: 'no', # Norwegian (Nynorsk)\r\n 1154: 'oc', # Occitan\r\n 1096: 'or', # Oriya\r\n 1138: 'om', # Oromo\r\n 1145: 'pap', # Papiamentu\r\n 1123: 'ps', # Pashto\r\n 1045: 'pl', # Polish\r\n 1046: 'pt_BR', # Portuguese - Brazil\r\n 2070: 'pt', # Portuguese - Portugal\r\n 1094: 'pa', # Punjabi\r\n 2118: 'pa', # Punjabi (Pakistan)\r\n 1131: 'qu', # Quecha - Bolivia\r\n 2155: 'qu', # Quecha - Ecuador\r\n 3179: 'qu', # Quecha - Peru\r\n 1047: 'rm', # Rhaeto-Romanic\r\n 1048: 'ro', # Romanian\r\n 2072: 'ro', # Romanian - Moldava\r\n 1049: 'ru', # Russian\r\n 2073: 'ru', # Russian - Moldava\r\n 1083: None, # Sami (Lappish)\r\n 1103: 'sa', # Sanskrit\r\n 1132: 'nso', # Northern Sotho\r\n 3098: 'sr', # Serbian (Cyrillic)\r\n 2074: 'sr@latin',# Serbian (Latin)\r\n 1113: 'sd', # Sindhi - India\r\n 2137: 'sd', # Sindhi - Pakistan\r\n 1115: 'si', # Sinhalese - Sri Lanka\r\n 1051: 'sk', # Slovak\r\n 1060: 'sl', # Slovenian\r\n 1143: 'so', # Somali\r\n 1070: None, # Sorbian\r\n 3082: 'es', # Spanish - Spain (Modern Sort)\r\n 1034: 'es', # Spanish - Spain (Traditional Sort)\r\n 11274: 'es', # Spanish - Argentina\r\n 16394: 'es', # Spanish - Bolivia\r\n 13322: 'es', # Spanish - Chile\r\n 9226: 'es', # Spanish - Colombia\r\n 5130: 'es', # Spanish - Costa Rica\r\n 7178: 'es', # Spanish - Dominican Republic\r\n 12298: 'es', # Spanish - Ecuador\r\n 17418: 'es', # Spanish - El Salvador\r\n 4106: 'es', # Spanish - Guatemala\r\n 18442: 'es', # Spanish - Honduras\r\n 58378: 'es', # Spanish - Latin America\r\n 2058: 'es', # Spanish - Mexico\r\n 19466: 'es', # Spanish - Nicaragua\r\n 6154: 'es', # Spanish - Panama\r\n 15370: 'es', # Spanish - Paraguay\r\n 10250: 'es', # Spanish - Peru\r\n 20490: 'es', # Spanish - Puerto Rico\r\n 21514: 'es', # Spanish - United States\r\n 14346: 'es', # Spanish - Uruguay\r\n 8202: 'es', # Spanish - Venezuela\r\n 1072: 'st', # Sutu\r\n 1089: 'sw', # Swahili\r\n 1053: 'sv', # Swedish\r\n 2077: 'sv', # Swedish - Finland\r\n 1114: 'syc', # Syriac\r\n 1064: 'tg', # Tajik\r\n 1119: None, # Tamazight (Arabic)\r\n 2143: None, # Tamazight (Latin)\r\n 1097: 'ta', # Tamil\r\n 1092: 'tt', # Tatar\r\n 1098: 'te', # Telugu\r\n 1054: 'th', # Thai\r\n 2129: 'bo', # Tibetan - Bhutan\r\n 1105: 'bo', # Tibetan - People's Republic of China\r\n 2163: 'ti', # Tigrigna - Eritrea\r\n 1139: 'ti', # Tigrigna - Ethiopia\r\n 1073: 'ts', # Tsonga\r\n 1074: 'tn', # Tswana\r\n 1055: 'tr', # Turkish\r\n 1090: 'tk', # Turkmen\r\n 1152: 'ug', # Uighur - China\r\n 1058: 'uk', # Ukrainian\r\n 1056: 'ur', # Urdu\r\n 2080: 'ur', # Urdu - India\r\n 2115: 'uz@cyrillic', # Uzbek (Cyrillic)\r\n 1091: 'uz', # Uzbek (Latin)\r\n 1075: 've', # Venda\r\n 1066: 'vi', # Vietnamese\r\n 1106: 'cy', # Welsh\r\n 1160: 'wo', # Wolof\r\n 1076: 'xh', # Xhosa\r\n 1157: 'sah', # Yakut\r\n 1144: 'ii', # Yi\r\n 1085: 'yi', # Yiddish\r\n 1130: 'yo', # Yoruba\r\n 1077: 'zu', # Zulu\r\n }\r\n\r\n return lcid.get(langid, None)\r\n\r\n\r\ndef get_win32_lang(system_ui=False):\r\n \"\"\"Return the locale for the user (default) or the system UI.\"\"\"\r\n # This supports windows MUI language packs and will return\r\n # the windows installation language if not available or\r\n # if the language has not been changed by the user.\r\n # Works on win2k and up.\r\n from ctypes import windll\r\n if system_ui:\r\n #Windows UI language\r\n langid = windll.kernel32.GetUserDefaultUILanguage()\r\n else:\r\n #User's locale\r\n langid = windll.kernel32.GetUserDefaultLangID()\r\n if not langid == 0:\r\n lang = _isofromlangid(langid) or 'C'\r\n else:\r\n lang = 'C'\r\n\r\n return lang\r\n\r\n\r\ndef _getlang():\r\n # Environment always overrides this for debugging purposes.\r\n lang = os.getenv('LANG') or get_win32_lang()\r\n return lang\r\n\r\n\r\ndef _putenv(name, value):\r\n # From python 2.4 on, os.environ changes only\r\n # work within python and no longer apply to low level\r\n # C stuff on win32. Let's force LANG so it works with\r\n # gtk+ etc\r\n from ctypes import windll\r\n kernel32 = windll.kernel32\r\n result = kernel32.SetEnvironmentVariableW(name, value)\r\n del kernel32\r\n if result == 0:\r\n raise\r\n\r\n from ctypes import cdll\r\n msvcrt = cdll.msvcrt\r\n result = msvcrt._putenv('%s=%s' % (name, value))\r\n del msvcrt\r\n\r\n\r\ndef fix_locale(lang=None):\r\n \"\"\"This fixes some strange issues to ensure locale and gettext works\r\n correctly, also within glade, even with a non-default locale passed as\r\n parameter.\"\"\"\r\n if sys.platform == 'win32':\r\n lang = lang or _getlang()\r\n\r\n _putenv('LANGUAGE', lang)\r\n\r\n os.environ['LANG'] = lang\r\n _putenv('LANG', lang)\r\n\r\n os.environ['LC_ALL'] = lang\r\n _putenv('LC_ALL', lang)\r\n if lang:\r\n # This is to support a non-locale UI language:\r\n os.environ['LANGUAGE'] = lang\r\n\r\n\r\ndef fix_libintl(main_dir):\r\n \"\"\"Bind gettext in the libintl since the gettext package doesn't.\"\"\"\r\n # See https://bugzilla.gnome.org/show_bug.cgi?id=574520\r\n from ctypes import cdll\r\n libintl = cdll.intl\r\n # we need main_dir in the filesystem encoding:\r\n main_dir = main_dir.encode(sys.getfilesystemencoding())\r\n locale_dir = os.path.join(main_dir, \"share\", \"locale\")\r\n libintl.bindtextdomain(\"virtaal\", locale_dir)\r\n libintl.bind_textdomain_codeset(\"virtaal\", 'UTF-8')\r\n del libintl\r\n"
},
{
"alpha_fraction": 0.30450546741485596,
"alphanum_fraction": 0.5400618314743042,
"avg_line_length": 71.47598266601562,
"blob_id": "3f2deb326117602621b7a027ae27e8782e3b01b2",
"content_id": "a9866886bdfdadb812d227b291e909b80d1d8256",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16824,
"license_type": "no_license",
"max_line_length": 632,
"num_lines": 229,
"path": "/Plots/simple/colorbasket.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 01 22:19:17 2015\r\n\r\nReturns additional custom color palette\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nfrom collections import OrderedDict\r\n\r\ndef rgb2hex(c):\r\n \"\"\"Convert from RGB triplets to hex string\"\"\"\r\n return('#{:02x}{:02x}{:02x}'.format(c[0], c[1], c[2]))\r\n\r\ndef hex2rgb(v):\r\n \"\"\"Convert a hex string to rgb tuple triplet\"\"\"\r\n v = v.lstrip('#')\r\n lv = len(v)\r\n return(tuple(int(v[i:i+lv/3], 16) for i in range(0, lv, lv/3)))\r\n\r\ndef rgb2cmyk(rgb,cmyk_scale = 1.0):\r\n if tuple(rgb)==(0.,0.,0.):\r\n # black\r\n return(0., 0., 0., cmyk_scale)\r\n\r\n # rgb [0,255] -> cmy [0,1]\r\n c,m,y = [1.0-a/255.0 for a in rgb]\r\n # extract out k [0,1]\r\n k = min(c, m, y)\r\n c,m,y = [(a - k)/(1.0-k) for a in (c,m,y)]\r\n # rescale to the range [0,cmyk_scale]\r\n return(c*cmyk_scale, m*cmyk_scale, y*cmyk_scale, k*cmyk_scale)\r\n\r\ndef rgbdecimal2int(c):\r\n \"\"\" scale RGB value from [0, 1] to [0, 255]\"\"\"\r\n return((int(c[0]*255.999), int(c[1]*255.999), int(c[2]*255.999) ))\r\n\r\ndef rgbint2decimal(c):\r\n \"\"\"scale RGB value from [0, 255] to [0, 1]\"\"\"\r\n return(c[0]/255.0, c[1]/255.0, c[2]/255.0)\r\n\r\ndef printCSS(c):\r\n \"\"\"Print CSS array of colors for copy and paste into CSS script\"\"\"\r\n\r\n if type(c) == 'collections.OrderedDict':\r\n c = c.values()\r\n clist = \" \".join([\".Set .q%d-%d{fill.:rgb(%d,%d,%d)}\" %((n,len(c)-1)+x) for n,x in enumerate(c)])\r\n print(clist) #.Set3 .q0-8{fill:rgb(141,211,199)} ...\r\n return(clist)\r\n\r\ndef printJS(c):\r\n \"\"\"print JavaScript array of colors for copy and paste into JS script\"\"\"\r\n if type(c) == 'collections.OrderedDict':\r\n c = c.values()\r\n clist = \",\".join([\"'rgb(%d,%d,%d)'\"%(x) for x in c])\r\n clist = \"[\"+clist+\"]\"\r\n print(clist) # ['rgb(255,245,240)',...]\r\n return(clist)\r\n\r\ndef ColorBrewer(cname='PuBuGn'):\r\n \"\"\"http://colorbrewer2.org/, used by web designers and R's ggplot\"\"\"\r\n return({\r\n 'BuGn' : OrderedDict([(0, (247,252,253)),(1, (229,245,249)),(2, (204,236,230)),(3, (153,216,201)),(4, (102,194,164)),(5, (65,174,118)),(6, (35,139,69)),(7, (0,109,44)),(8, (0,68,27))]),\r\n 'BuPu' : OrderedDict([(0, (247,252,253)),(1, (224,236,244)),(2, (191,211,230)),(3, (158,188,218)),(4, (140,150,198)),(5, (140,107,177)),(6, (136,65,157)),(7, (129,15,124)),(8, (77,0,75))]),\r\n 'GnBu' : OrderedDict([(0, (247,252,240)),(1, (224,243,219)),(2, (204,235,197)),(3, (168,221,181)),(4, (123,204,196)),(5, (78,179,211)),(6, (43,140,190)),(7, (8,104,172)),(8, (8,64,129))]),\r\n 'OrRd' : OrderedDict([(0, (255,247,236)),(1, (254,232,200)),(2, (253,212,158)),(3, (253,187,132)),(4, (252,141,89)),(5, (239,101,72)),(6, (215,48,31)),(7, (179,0,0)),(8, (127,0,0))]),\r\n 'PuBu' : OrderedDict([(0, (255,247,251)),(1, (236,231,242)),(2, (208,209,230)),(3, (166,189,219)),(4, (116,169,207)),(5, (54,144,192)),(6, (5,112,176)),(7, (4,90,141)),(8, (2,56,88))]),\r\n 'PuBuGn' : OrderedDict([(0, (255,247,251)),(1, (236,226,240)),(2, (208,209,230)),(3, (166,189,219)),(4, (103,169,207)),(5, (54,144,192)),(6, (2,129,138)),(7, (1,108,89)),(8, (1,70,54))]),\r\n 'PuRd' : OrderedDict([(0, (247,244,249)),(1, (231,225,239)),(2, (212,185,218)),(3, (201,148,199)),(4, (223,101,176)),(5, (231,41,138)),(6, (206,18,86)),(7, (152,0,67)),(8, (103,0,31))]),\r\n 'RdPu' : OrderedDict([(0, (255,247,243)),(1, (253,224,221)),(2, (252,197,192)),(3, (250,159,181)),(4, (247,104,161)),(5, (221,52,151)),(6, (174,1,126)),(7, (122,1,119)),(8, (73,0,106))]),\r\n 'YlGn' : OrderedDict([(0, (255,255,229)),(1, (247,252,185)),(2, (217,240,163)),(3, (173,221,142)),(4, (120,198,121)),(5, (65,171,93)),(6, (35,132,67)),(7, (0,104,55)),(8, (0,69,41))]),\r\n 'YlGnBu' : OrderedDict([(0, (255,255,217)),(1, (237,248,177)),(2, (199,233,180)),(3, (127,205,187)),(4, (65,182,196)),(5, (29,145,192)),(6, (34,94,168)),(7, (37,52,148)),(8, (8,29,88))]),\r\n 'YlOrBr' : OrderedDict([(0, (255,255,229)),(1, (255,247,188)),(2, (254,227,145)),(3, (254,196,79)),(4, (254,153,41)),(5, (236,112,20)),(6, (204,76,2)),(7, (153,52,4)),(8, (102,37,6))]),\r\n 'YlOrRd' : OrderedDict([(0, (255,255,204)),(1, (255,237,160)),(2, (254,217,118)),(3, (254,178,76)),(4, (253,141,60)),(5, (252,78,42)),(6, (227,26,28)),(7, (189,0,38)),(8, (128,0,38))]),\r\n 'Blues' : OrderedDict([(0, (247,251,255)),(1, (222,235,247)),(2, (198,219,239)),(3, (158,202,225)),(4, (107,174,214)),(5, (66,146,198)),(6, (33,113,181)),(7, (8,81,156)),(8, (8,48,107))]),\r\n 'Greens' : OrderedDict([(0, (247,252,245)),(1, (229,245,224)),(2, (199,233,192)),(3, (161,217,155)),(4, (116,196,118)),(5, (65,171,93)),(6, (35,139,69)),(7, (0,109,44)),(8, (0,68,27))]),\r\n 'Greys' : OrderedDict([(0, (255,255,255)),(1, (240,240,240)),(2, (217,217,217)),(3, (189,189,189)),(4, (150,150,150)),(5, (115,115,115)),(6, (82,82,82)),(7, (37,37,37)),(8, (0,0,0))]),\r\n 'Oranges' : OrderedDict([(0, (255,245,235)),(1, (254,230,206)),(2, (253,208,162)),(3, (253,174,107)),(4, (253,141,60)),(5, (241,105,19)),(6, (217,72,1)),(7, (166,54,3)),(8, (127,39,4))]),\r\n 'Purples' : OrderedDict([(0, (252,251,253)),(1, (239,237,245)),(2, (218,218,235)),(3, (188,189,220)),(4, (158,154,200)),(5, (128,125,186)),(6, (106,81,163)),(7, (84,39,143)),(8, (63,0,125))]),\r\n 'Reds' : OrderedDict([(0, (255,245,240)),(1, (254,224,210)),(2, (252,187,161)),(3, (252,146,114)),(4, (251,106,74)),(5, (239,59,44)),(6, (203,24,29)),(7, (165,15,21)),(8, (103,0,13))]),\r\n 'BrBG' : OrderedDict([(0, (84,48,5)),(1, (140,81,10)),(2, (191,129,45)),(3, (223,194,125)),(4, (246,232,195)),(5, (245,245,245)),(6, (199,234,229)),(7, (128,205,193)),(8, (53,151,143)),(9, (1,102,94)),(10, (0,60,48))]),\r\n 'PiYG' : OrderedDict([(0, (142,1,82)),(1, (197,27,125)),(2, (222,119,174)),(3, (241,182,218)),(4, (253,224,239)),(5, (247,247,247)),(6, (230,245,208)),(7, (184,225,134)),(8, (127,188,65)),(9, (77,146,33)),(10, (39,100,25))]),\r\n 'PRGn' : OrderedDict([(0, (64,0,75)),(1, (118,42,131)),(2, (153,112,171)),(3, (194,165,207)),(4, (231,212,232)),(5, (247,247,247)),(6, (217,240,211)),(7, (166,219,160)),(8, (90,174,97)),(9, (27,120,55)),(10, (0,68,27))]),\r\n 'PuOr' : OrderedDict([(0, (127,59,8)),(1, (179,88,6)),(2, (224,130,20)),(3, (253,184,99)),(4, (254,224,182)),(5, (247,247,247)),(6, (216,218,235)),(7, (178,171,210)),(8, (128,115,172)),(9, (84,39,136)),(10, (45,0,75))]),\r\n 'RdBu' : OrderedDict([(0, (103,0,31)),(1, (178,24,43)),(2, (214,96,77)),(3, (244,165,130)),(4, (253,219,199)),(5, (247,247,247)),(6, (209,229,240)),(7, (146,197,222)),(8, (67,147,195)),(9, (33,102,172)),(10, (5,48,97))]),\r\n 'RdGy' : OrderedDict([(0, (103,0,31)),(1, (178,24,43)),(2, (214,96,77)),(3, (244,165,130)),(4, (253,219,199)),(5, (255,255,255)),(6, (224,224,224)),(7, (186,186,186)),(8, (135,135,135)),(9, (77,77,77)),(10, (26,26,26))]),\r\n 'RdYlBu' : OrderedDict([(0, (165,0,38)),(1, (215,48,39)),(2, (244,109,67)),(3, (253,174,97)),(4, (254,224,144)),(5, (255,255,191)),(6, (224,243,248)),(7, (171,217,233)),(8, (116,173,209)),(9, (69,117,180)),(10, (49,54,149))]),\r\n 'RdYlGn' : OrderedDict([(0, (165,0,38)),(1, (215,48,39)),(2, (244,109,67)),(3, (253,174,97)),(4, (254,224,139)),(5, (255,255,191)),(6, (217,239,139)),(7, (166,217,106)),(8, (102,189,99)),(9, (26,152,80)),(10, (0,104,55))]),\r\n 'Spectral' : OrderedDict([(0, (158,1,66)),(1, (213,62,79)),(2, (244,109,67)),(3, (253,174,97)),(4, (254,224,139)),(5, (255,255,191)),(6, (230,245,152)),(7, (171,221,164)),(8, (102,194,165)),(9, (50,136,189)),(10, (94,79,162))]),\r\n 'Paired' : OrderedDict([(0, (166,206,227)),(1, (31,120,180)),(2, (178,223,138)),(3, (51,160,44)),(4, (251,154,153)),(5, (227,26,28)),(6, (253,191,111)),(7, (255,127,0)),(8, (202,178,214)),(9, (106,61,154)),(10, (255,255,153)),(11, (177,89,40))]),\r\n 'Pastel1' : OrderedDict([(0, (251,180,174)),(1, (179,205,227)),(2, (204,235,197)),(3, (222,203,228)),(4, (254,217,166)),(5, (255,255,204)),(6, (229,216,189)),(7, (253,218,236)),(8, (242,242,242))]),\r\n 'Pastel2' : OrderedDict([(0, (179,226,205)),(1, (253,205,172)),(2, (203,213,232)),(3, (244,202,228)),(4, (230,245,201)),(5, (255,242,174)),(6, (241,226,204)),(7, (204,204,204))]),\r\n 'Set1' : OrderedDict([(0, (228,26,28)),(1, (55,126,184)),(2, (77,175,74)),(3, (152,78,163)),(4, (255,127,0)),(5, (255,255,51)),(6, (166,86,40)),(7, (247,129,191)),(8, (153,153,153))]),\r\n 'Set2' : OrderedDict([(0, (102,194,165)),(1, (252,141,98)),(2, (141,160,203)),(3, (231,138,195)),(4, (166,216,84)),(5, (255,217,47)),(6, (229,196,148)),(7, (179,179,179))]),\r\n 'Set3' : OrderedDict([(0, (141,211,199)),(1, (255,255,179)),(2, (190,186,218)),(3, (251,128,114)),(4, (128,177,211)),(5, (253,180,98)),(6, (179,222,105)),(7, (252,205,229)),(8, (217,217,217)),(9, (188,128,189)),(10, (204,235,197)),(11, (255,237,111))]),\r\n 'Accent' : OrderedDict([(0, (127,201,127)),(1, (190,174,212)),(2, (253,192,134)),(3, (255,255,153)),(4, (56,108,176)),(5, (240,2,127)),(6, (191,91,23)),(7, (102,102,102))]),\r\n 'Dark2' : OrderedDict([(0, (27,158,119)),(1, (217,95,2)),(2, (117,112,179)),(3, (231,41,138)),(4, (102,166,30)),(5, (230,171,2)),(6, (166,118,29)),(7, (102,102,102))])\r\n }.get(cname))\r\n\r\ndef Tableau(cname='tableau10'):\r\n \"\"\"tableau color\r\n tableau20 color naming from:\r\n https://gist.github.com/Nepomuk/859fef81a912a9fe425e\r\n \"\"\"\r\n return({\r\n 'tableau20':OrderedDict([('steelblue',(31,119,180)),('lightsteelblue',(174,199,232)),('darkorange',(255, 127, 14)), ('peachpuff',(255, 187, 120)), ('green',(44, 160, 44)), ('lightgreen',(152, 223, 138)),('crimson',(214, 39, 40)), ('lightcoral',(255, 152, 150)),('mediumpurple',(148, 103, 189)), ('thistle',(197, 176, 213)), ('saddlebrown',(140, 86, 75)),('rosybrown',(196, 156, 148)),('orhchid',(227, 119, 194)),('lightpink',(247, 182, 210)),('gray',(127, 127, 127)), ('lightgray',(199, 199, 199)),('olive',(188, 189, 34)),('palegoldenrod',(219, 219, 141)), ('mediumtorquoise',(23, 190, 207)),('paleturqoise',(158, 218, 229))]),\r\n 'tableau10':OrderedDict([('steelblue',(31,119,180)),('darkorange',(255,127,14)),('green',(44,160,44)),('crimson',(214,39,40)),('mediumpurple',(148,103,189)),('saddlebrown',(140,86,75)),('orhchid',(227,119,194)),('gray',(127,127,127)),('olive',(188,189,34)),('mediumtorquoise',(23,190,207))]),\r\n 'tableau10light':OrderedDict([('lightsteelblue',(174, 199, 232)),('peachpuff',(255, 187, 120)),('lightgreen',(152, 223, 138)),('lightcoral',(255, 152, 150)),('thistle',(197, 176, 213)),('rosybrown',(196, 156, 148)),('lightpink',(247, 182, 210)),('lightgray',(199, 199, 199)),('palegoldenrod',(219, 219, 141)),('paleturqoise',(158, 218, 229))]),\r\n 'tableau10medium':OrderedDict([('cerulean',(114,158,206)),('orange',(255,158,74)),('younggreen',(103,191,92)),('red',(237,102,93)),('violet',(173,139,201)),('cocoa',(168,120,110)),('pink',(237,151,202)),('silver',(162,162,162)),('witheredyellow',(205,204,93)),('aqua',(109,204,218))]),\r\n 'tableau10blind':OrderedDict([('deepskyblue4',(0, 107, 164)),('darkorange1',(255, 128, 14)),('darkgray',(171, 171, 171)),('dimgray',( 89, 89, 89)),('skyblue3',( 95, 158, 209)),('chocolate3',(200, 82, 0)),('gray',(137, 137, 137)),('slategray1',(163, 200, 236)),('sandybrown',(255, 188, 121)),('lightgray',(207, 207, 207))]),\r\n 'tableaugray5': OrderedDict([('gray1',(207,207,207)),('gray2',(165,172,175)),('gray3',(143,135,130)),('gray4',(96,99,106)),('gray5',(65,68,81))]),\r\n 'tableau10new': OrderedDict([('steelblue', (78,121,167)), ('darkorange', (242,142,43)), ('crimson', (225,87,89)), ('turqoise', (118,183,178)), ('green', (89,175,79)), ('gold', (237,201,72)), ('vilot', (176,122,161)), ('pink', (255,157,167)), ('coffee', (156,117,95)), ('grey', (186,176,172))])\r\n }.get(cname))\r\n\r\ndef MATLAB(cname='matlabnew'):\r\n \"\"\"MATLAB color scheme\"\"\"\r\n return({\r\n 'matlabnew': OrderedDict([('blue',(0, 114, 189)),('orange',(217, 83, 25)),('yellow',(237, 177, 31)), ('purple',(126, 47, 142)),('green',(119, 172, 48)),('skyblue',(77, 190, 238)),('crimson',(162, 19, 47))]),\r\n 'matlabold': OrderedDict([('black',(0,0,0)),('red',(255,0,0)),('blue',(0,0,255)), ('orange',(255,165,0)),('green',(0,127,0)), ('cyan', (0, 191,191)),('magenta', (191, 0, 191))])\r\n }).get(cname)\r\n \r\n\r\nclass ColorPalette(object):\r\n \"\"\"Color Pallete utility\"\"\"\r\n def __init__(self, palette=None):\r\n \"\"\"Initialization\r\n \"\"\"\r\n self.palette = palette\r\n if self.palette is not None:\r\n self.colors = self.get_palette(palette)\r\n\r\n def get_palette(self,palette='tableau10',Hex=True, returnOnly='code',\r\n reverseOrder=False):\r\n \"\"\"Instance method for _get_palette\"\"\"\r\n self.colors = self._get_palette(palette,Hex,returnOnly,reverseOrder)\r\n\r\n @classmethod\r\n def _get_palette(cls, palette='tableau10', returnType='hex', returnOnly='code',\r\n reverseOrder=False):\r\n \"\"\"A list of colors in RGB\r\n cname: color name. Default 'tableau20'\r\n returnOnly: ['code'|'name'], return only RGB color code or color name\r\n as a list\r\n Invert: (True / False) inverse the color order, default ordering from\r\n light to dark hues\r\n \"\"\"\r\n if palette in cls.list_palette('tableau'):\r\n colors = Tableau(palette)\r\n elif palette in cls.list_palette('colorbrewer'):\r\n # http://colorbrewer2.org/, used by web designers and R's ggplot\r\n colors = ColorBrewer(palette)\r\n elif palette in cls.list_palette('matlab'):\r\n colors = MATLAB(palette)\r\n else:# Other custom colors\r\n colors = Tableau('tableau10')\r\n if reverseOrder:\r\n colors = OrderedDict(list(reversed(list(colors.items()))))\r\n # convert to html hex strings\r\n colors = {\r\n 'dec': OrderedDict([(k,rgbint2decimal(colors[k])) \r\n for k in colors.keys()]),\r\n 'rgb': colors,\r\n }.get(returnType, OrderedDict([(k, rgb2hex(colors[k]))\r\n for k in colors.keys()])) # ddefulat hex\r\n # Return\r\n return({'code': list(colors.values()),'name': list(colors.keys())\r\n }.get(returnOnly, colors))\r\n\r\n @classmethod\r\n def list_palette(cls, scheme='tableau'):\r\n return({\r\n 'tableau': ['tableau20', 'tableau10', 'tableau10light', 'tableau10medium', 'tableau10blind', 'tableaugray5'],\r\n 'colorbrewer': ['Spectral','Pastel2','RdPu','RdYlGn','PuOr','Greens','PRGn','Accent','OrRd','YlGnBu','RdYlBu','Paired','RdGy','PuBu','Set3','BrBG','Purples','Reds','YlOrRd','Pastel1','RdBu','GnBu','BuPu','Dark2','Greys','Oranges','BuGn','Set2','PiYG','YlOrBr','PuRd','Blues','PuBuGn','YlGn','Set1'],\r\n 'matlab':['matlabnew', 'matlabold']\r\n }.get(scheme))\r\n\r\n @classmethod\r\n def list_scheme(cls):\r\n print('tableau; colorbrewer; matlab')\r\n \r\n def show_palette(self, palette='tableau'):\r\n \"\"\"class instance of _show_all_palette\"\"\"\r\n self.fig, self.axs = self._show_all_palette(palette)\r\n \r\n @classmethod\r\n def _show_all_palette(cls,palette='tableau'):\r\n \"\"\"\r\n Plot all palettes in a scheme\r\n \"\"\"\r\n import matplotlib.pyplot as plt\r\n # get the list of palettes\r\n plist = cls.list_palette(palette)\r\n npalette = len(plist)\r\n # start the figure\r\n fig, axs = plt.subplots(nrows=npalette,ncols=1)\r\n for n,pname in enumerate(plist):\r\n # get the color\r\n colors = cls._get_palette(pname,returnType='dec',returnOnly='code') \r\n cname = cls._get_palette(pname,returnOnly='name') # get color name\r\n cls.palette_ax(axs[n], colors, pname, cname)\r\n plt.show()\r\n fig.tight_layout()\r\n return(fig, axs)\r\n\r\n @staticmethod\r\n def palette_ax(ax, colors, pname=\"\", cname=[]):\r\n \"\"\"Plot one palette\"\"\"\r\n from matplotlib.colors import ListedColormap\r\n import numpy as np\r\n colors = np.array([list(c) for c in colors])\r\n gradient = np.linspace(0, 1, len(colors))\r\n gradient = np.vstack((gradient, gradient))\r\n cmap = ListedColormap(colors)\r\n ax.imshow(gradient,aspect='auto',cmap=cmap, origin='lower',\r\n interpolation='none')\r\n #plt.xticks(range(len(colors)),cname,rotation='vertical')\r\n ax.set_ylabel(pname)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['left'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['bottom'].set_visible(False)\r\n ax.tick_params(axis='both', which='both', left='off',right='off',\r\n top='off',bottom='off',\r\n labelleft='off',labelbottom='off')\r\n\r\n \r\nif __name__==\"__main__\":\r\n fig, ax = ColorPalette()._show_all_palette(palette='tableau')"
},
{
"alpha_fraction": 0.567535936832428,
"alphanum_fraction": 0.5794041752815247,
"avg_line_length": 42.473968505859375,
"blob_id": "dd75970cda73c4764a12e5cf1d3b1ca4d4e46239",
"content_id": "33975d00d8b5567327fe1602baf35a8008e120d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24772,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 557,
"path": "/Plots/simple/beeswarm.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nMatplotlib adapation of R's beeswarm pacakge\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.neighbors.kde import KernelDensity\r\n\r\nfrom pdb import set_trace\r\n\r\ndef beeswarm(df, values, group=None, cluster=None, positions=None,\r\n method='swarm', corral='none', corralWidth=None,\r\n side=int(0), priority='ascending', ax=None, orientation='vertical',\r\n xlim=None, ylim=None, xlab=None, ylab=None, legendon=True,\r\n legend=None, legendtitle=None, labels=None, labelson=True,\r\n ticklabelrotation='horizontal', log=False, s=33., dpi=72.,\r\n figsize=(10.,5.), color=('k','r'), colortheme='cluster',\r\n reset_index=False, \r\n **kwargs):\r\n \"\"\"\r\n Helper functions:\r\n * connect_paired_dots\r\n * add_average_bar\r\n \r\n Inputs:\r\n * df: data frame\r\n * values: column name of the data to be plotted\r\n * group: column name of the category vector\r\n * cluster: column name of the vector that further divides each group\r\n * positions: sets the horizontal positions of the swarms.\r\n Ticks and labels are set to match the positions.\r\n If none, set positions to range(len(values))\r\n Default: None\r\n * method: how to jitter the x,y coordinates. Choose from\r\n \"swarm\", \"hex\", \"center\", \"square\"\r\n Default: swarm\r\n * corral: Method to adjust points that would be placed outside their own group region.\r\n Choose from 'none', 'gutter', 'wrap','random', 'omit'\r\n * corralWidth: Width of the \"corral\" in user coordinates. If missing, a sensible value will be chosen.\r\n * side: Direction to perform jittering\r\n 0: both directions; 1: to the right or upwards; -1: to the left or downwards.\r\n * priority: Order used to perform point layout when method is \"swarm\"; ignored otherwise.\r\n * ax: use this axis for plotting. If none supplied, make a new one\r\n Default: None.\r\n * orientation: direction of swarm plot. Defualt 'vertical'\r\n * xlim, ylim: specify x,y axes limits; specify each as a tuple\r\n * xlab, ylab: x, y axes labels\r\n * legendon: turn on legend if cluster is not None. Default True.\r\n * legend: a list of names for legend\r\n * legendtitle: title of legend\r\n * labels: tick label of categorical axis\r\n * labelson: turn on or off xlabel (Default True)\r\n * labelrotation: rotation of x label.\r\n Default: 'horizontal'\r\n * log: specify a function to apply log transformation of data, e.g. np.log10\r\n Default: False.\r\n * s: size of points in points^2 (assuming 72 points/inch).\r\n Default: 33 (corresponding roughly to 0.08 inch)\r\n * dpi: dots per point. Default 72.\r\n * figsize: figure size (width, height). Default (10.0,5.0). \r\n Need to specify before plotting for proper scatter spacing.\r\n * color: color of points. Can be:\r\n - a single string: color all points that color\r\n - a list of colors which will be cycled through\r\n - a list of colors with length identical to number of rows of df\r\n Default: ('black','red')\r\n * colortheme: mode of color theme\r\n - 'group': different colors for different group, but same for different clusters within the group\r\n - 'cluster': (Default) different colors for different clusters, but the same set of colors for each group\r\n - 'multi': different colors for different group x clusters\r\n - 'floral': as in floral pattern (e.g. wallpaper, dresses), where each point within\r\n group x cluster are cycled using color. Make sure set 'cluster=None'.\r\n * reset_index: reset the index of the df before making the plot. Default False\r\n * kwargs: whichever arguments that are relevent for plt.scatter\r\n\r\n Returns:\r\n * ax: the axis used for plotting\r\n * bs: pandas.DataFrame with columns: xorig, yorig, xnew, ynew, color\r\n\r\n Caveats:\r\n The beeswarm algorithm depends heavily on dpi, dot size and figure\r\n size. It is necessary to fine tune these three parameters so that the\r\n plot looks nicely. By default. We set dpi to be 72, dot size to be 33\r\n (coresponding roughly to 0.08 inch, use by R's beeswarm package), and\r\n figure size to be (10,5)\r\n \"\"\"\r\n # Reset df index\r\n if reset_index:\r\n df = df.reset_index(drop=True)\r\n \r\n # Create axis handle if not specified in the argument\r\n if ax is None:\r\n fig, ax = plt.subplots(nrows=1, ncols=1)\r\n # The algorithm heavily depends on dpi and figure size\r\n ax.get_figure().set_dpi(dpi)\r\n ax.get_figure().set_size_inches(figsize)\r\n\r\n # Create positions vector if not speicifed in the argument\r\n # set_trace()\r\n ngroup = len(np.unique(df[group])) if group in list(df.columns.values) else 1\r\n if positions is None:\r\n positions = np.arange(ngroup)\r\n elif len(positions) != ngroup:\r\n raise(ValueError('\"positions\" must have length equal to %d, the number of groups'%(ngroup)))\r\n print(positions)\r\n\r\n # Set the extent of axis\r\n if xlim is not None:\r\n ax.set_xlim(left=xlim[0], right=xlim[1])\r\n else:\r\n xx = max(positions) - min(positions) + 1\r\n xmin = min(positions)-0.1*xx\r\n xmax = max(positions)+0.1*xx\r\n ax.set_xlim(left=xmin, right=xmax)\r\n xlim = ax.get_xlim() if xlim is None else xlim\r\n if ylim is not None:\r\n ax.set_ylim(bottom=ylim[0], top=ylim[1])\r\n else:\r\n yy = max(df[values]) - min(df[values])\r\n ymin = min(df[values])-.05*yy\r\n ymax = max(df[values])+0.05*yy\r\n ax.set_ylim(bottom=ymin, top=ymax)\r\n ylim = ax.get_ylim() if ylim is None else ylim\r\n\r\n # Get dot size\r\n xsize, ysize = xydotsize(ax, s=s, dpi=dpi)\r\n\r\n # Create legends if not specified in the argument\r\n ncluster = len(np.unique(df[cluster])) if cluster in list(df.columns.values) else 1\r\n if ncluster==1:\r\n legend = None\r\n else:\r\n if legend is None:\r\n pass\r\n elif isinstance(legend, (list, tuple, np.ndarray)) and len(legend) != ncluster: # sanity check\r\n raise(ValueError('\"legend\" must have length equal to %d, the number of clusters'%(ncluster)))\r\n \r\n # Create group labels if not specified in the argument\r\n if labels is None:\r\n labels = stable_unique(df[group].values)\r\n\r\n # Get the color vector\r\n if isinstance(color, str): color = (color)\r\n color_spec = {\r\n 'group': colorvect([group], df, color=color),\r\n 'cluster': colorvect([cluster], df, color=color) if cluster is not None else color[0],\r\n 'multi': colorvect([group] if cluster is None else [group, cluster], df, color=color),\r\n 'floral': [color[d%len(color)] for d in df.index]\r\n }.get(colortheme)\r\n if color_spec is None:\r\n raise(ValueError('Unrecognized color theme: %s')%(colortheme))\r\n \r\n \r\n # Create a new dataframe\r\n bs = pd.DataFrame({'xorig':0, 'yorig':df[values], 'xnew':0, 'ynew':df[values], 'color':color_spec})\r\n \r\n #set_trace()\r\n\r\n # Adjust data along the grouping dimension: for now, plot vertically\r\n g_offset, g_pos, d_pos = [], [], []\r\n for n, g in enumerate(stable_unique(df[group].values)):\r\n y = df.loc[df[group]==g, values]\r\n x = np.repeat(positions[n], len(y))\r\n # jitter data\r\n if method == 'swarm':\r\n if orientation == 'vertical':\r\n g_pos.append(swarmx(x, y, xsize=xsize, ysize=ysize, side=side, priority=priority, ylog=log))\r\n g_offset.append(g_pos[n] - x)\r\n else: # horizontal\r\n g_pos = swarmy(x, y, xsize=xsize, ysize=ysize, side=side, priority=priority, xlog=log)\r\n g_offset = g_pos - y\r\n else: # other methods\r\n if orientation == 'vertical':\r\n g_pos, d_pos = gridx(x, y, xsize=xsize, ysize=ysize, dlim=ylim, method=method, side=side, log=log)\r\n g_offset = g_pos - x\r\n else: # horizontal\r\n g_pos, d_pos = gridy(x, y, xsize=xsize, ysize=ysize, dlim=xlim, method=method, side=side, log=log)\r\n g_offset = g_pos - y\r\n\r\n # check corral\r\n g_offset = _corral(positions, g_offset, size_g=xsize, ax=ax, corral=corral, corralWidth=corralWidth)\r\n\r\n # parse data frame\r\n for n, g in enumerate(stable_unique(df[group].values)):\r\n if orientation == 'vertical':\r\n bs.loc[df[group]==g, 'xorig'] = positions[n] # original position\r\n bs.loc[df[group]==g, 'xnew'] = np.array(g_pos[n]) # group offset\r\n if method != 'swarm': # data offset for non-swarm\r\n bs.loc[df[group]==g, 'ynew'] = np.array(d_pos[n])\r\n else:\r\n bs.loc[df[group]==g, 'yorig'] = positions[n] # original position\r\n bs.loc[df[group]==g, 'ynew'] = np.array(g_pos[n]) # group offset\r\n if method != 'swarm': # data offset for non-swarm\r\n bs.loc[df[group]==g, 'xnew'] = np.array(d_pos[n])\r\n\r\n # Readjust the axis again in case any dots are being cutt off\r\n if orientation == 'vertical': # adjust x axis\r\n xx = max(bs['xnew']) - min(bs['xnew'])\r\n xmin = min(bs['xnew'])-0.1*xx\r\n xmax = max(bs['xnew'])+0.1*xx\r\n ax.set_xlim(left=xmin, right=xmax)\r\n else: # horizontal, adjust y axis\r\n yy = max(bs['ynew']) - min(bs['ynew'])\r\n ymin = min(bs['ynew'])-.05*yy\r\n ymax = max(bs['ynew'])+0.05*yy\r\n ax.set_ylim(bottom=ymin, top=ymax)\r\n \r\n\r\n # Do the plot\r\n if cluster is None or ncluster==1:\r\n ax.scatter(bs['xnew'], bs['ynew'], s=s, c=bs['color'], **kwargs)\r\n else: # iterate over clusters\r\n for m, cl in enumerate(stable_unique(df[cluster].values)):\r\n ind = df[cluster]==cl\r\n ax.scatter(bs.loc[ind,'xnew'], bs.loc[ind, 'ynew'], s=s, \\\r\n c=bs.loc[ind, 'color'],\r\n label=cl if legend is None else legend[m], **kwargs)\r\n if legendon: # turn on legend\r\n ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,\\\r\n title=legendtitle)\r\n \r\n # set axis tick / group label\r\n ax.set_xticks(positions)\r\n if labels is not None and labelson:\r\n ax.set_xticklabels(labels, rotation=ticklabelrotation)\r\n elif not labelson:\r\n ax.set_xticklabels([\"\"]*len(np.unique(df[group])))\r\n\r\n # set x, y label\r\n if xlab is not None: ax.set_xlabel(xlab)\r\n if ylab is not None: ax.set_ylabel(ylab)\r\n \r\n if xlim is not None: ax.set_xlim(*xlim)\r\n if ylim is not None: ax.set_ylim(*ylim)\r\n # Return items\r\n return(ax, bs)\r\n\r\ndef xydotsize(ax, s=None, dpi=None, scale=(1.25,1.25)):\r\n \"\"\" Determine dot size in data axis.\r\n scale: helps further increasing space between dots\r\n s: font size in points\r\n \"\"\"\r\n figw, figh = ax.get_figure().get_size_inches() # figure width, height in inch\r\n dpi = float(ax.get_figure().get_dpi()) if dpi is None else float(dpi)\r\n w = (ax.get_position().xmax-ax.get_position().xmin)*figw # axis width in inch\r\n h = (ax.get_position().ymax-ax.get_position().ymin)*figh # axis height in inch\r\n xran = ax.get_xlim()[1]-ax.get_xlim()[0] # axis width in data\r\n yran = ax.get_ylim()[1]-ax.get_ylim()[0] # axis height in data\r\n if s is None:\r\n xsize=0.08*xran/w*scale[0] # xscale * proportion of xwidth in data\r\n ysize=0.08*yran/h*scale[1] # yscale * proportion of yheight in data\r\n else:\r\n xsize=np.sqrt(s)/dpi*xran/w*scale[0] # xscale * proportion of xwidth in data\r\n ysize=np.sqrt(s)/dpi*yran/h*scale[1] # yscale * proportion of yheight in data\r\n\r\n return(xsize, ysize)\r\n\r\ndef _calculateSwarm(x, dsize, gsize, side=int(0), priority='ascending'):\r\n \"\"\"Implement swarm layout algorithm\r\n gsize: group dimension size\r\n dsize: data dimension size\r\n \"\"\"\r\n # parse some inputs\r\n if x is None:\r\n return\r\n if side not in [-1,0,1]:\r\n raise(ValueError('\"side\" must be -1,0,or 1'))\r\n\r\n # make sure things are operating in float\r\n x, dsize, gsize = np.float32(x), np.float32(dsize), np.float32(gsize)\r\n # take out missing values\r\n x = x[~np.isnan(x)]\r\n #global out\r\n\r\n # initialize data frame\r\n out = pd.DataFrame({'x':x/dsize, 'y':0}, index=np.arange(len(x))) #*0.7987600831790864\r\n\r\n # Determine the order in which points will be placed\r\n out = {\r\n 'ascending': out.sort_values('x', ascending=True, axis=0),\r\n 'descending': out.sort_values('x', ascending=False, axis=0),\r\n 'none': out, # do not reorder\r\n 'random': out.reindex(np.random.permutation(out.index)), # randomly gitter\r\n 'density': out.reindex(np.argsort(-KernelDensity(kernel='gaussian',\\\r\n bandwidth=0.2).fit(out['x'][:,np.newaxis]).score_samples(\\\r\n out['x'][:,np.newaxis]))), # arrange outward from densest areas, result is simlar but not the same as R\r\n }.get(priority)\r\n\r\n # place the points: we will place once point at a time\r\n if len(out.index)>1:\r\n for ii in range(1, len(out.index)):\r\n xi = out.loc[out.index[ii], 'x']# get ii of sorted\r\n # identify previously-placed points with potential to overlap the current point\r\n isPotOverlap = (np.abs(xi - np.array(out['x']))<1.0) & (np.arange(len(out.index))<ii)\r\n #print(xi, np.where(isPotOverlap))\r\n if any(isPotOverlap):\r\n pre_x = out.loc[isPotOverlap,'x']\r\n pre_y = out.loc[isPotOverlap,'y']\r\n poty_off = np.sqrt(1-((xi - pre_x)**2)) # potential y offsets\r\n poty = {\r\n -1: np.concatenate(([0.], pre_y - poty_off)),\r\n 0: np.concatenate(([0.], pre_y + poty_off, pre_y - poty_off)),\r\n 1: np.concatenate(([0.], pre_y + poty_off))\r\n }.get(side)\r\n\r\n def checkoverlap(y): # check for overlaps afterward\r\n return(any(((xi - pre_x) ** 2 + (y - pre_y) ** 2) < 0.999))\r\n poty_bad = np.array([checkoverlap(y) for y in poty])\r\n poty[poty_bad] = np.inf\r\n out.loc[out.index[ii], 'y'] = poty[np.argmin(abs(poty))]\r\n else:\r\n out.loc[out.index[ii], 'y'] = 0\r\n\r\n out.loc[np.isnan(out['x']),'y'] = np.nan # missing x values should have missing y values\r\n return(out.sort_index()['y'] * gsize)\r\n\r\ndef swarmx(x, y, xsize, ysize, side=int(0), priority='ascending', xlog=False, ylog=False):\r\n \"\"\"jitter points horizontally\r\n xlog, ylog: must be a function, e.g. np.log10; Default False\r\n \"\"\"\r\n #print(y)\r\n if xlog:\r\n x = xlog(x)\r\n if ylog:\r\n y = ylog(y)\r\n g_pos = x + _calculateSwarm(y, dsize=ysize, gsize=xsize, side=side, priority=priority)\r\n if xlog:\r\n # get base: will not work with log1p\r\n b = np.exp(np.log(5.0)/xlog(5.0)) # 5.0, or any constants to reverse calculate base\r\n g_pos = b**g_pos\r\n return(g_pos)\r\n #return(pd.DataFrame({x=x_new, y=y}))\r\n\r\ndef swarmy(x, y, xsize, ysize, side=int(0), priority='ascending', xlog=False, ylog=False):\r\n \"\"\" jitter points vertically\r\n xlog, ylog: must be a function, e.g. np.log10; Default False\r\n \"\"\"\r\n if xlog:\r\n x = xlog(x)\r\n if ylog:\r\n y = ylog(y)\r\n g_pos = y + _calculateSwarm(x, dsize=xsize, gsize=ysize, side=side, priority=priority)\r\n if ylog:\r\n # get base: will not work with log1p\r\n b = np.exp(np.log(5.0)/ylog(5.0)) # 5.0, or any constants to reverse calculate base\r\n g_pos = b**g_pos\r\n return(g_pos)\r\n #return(pd.DataFrame({x=x, y=y_new}))\r\n\r\n\r\ndef _calculateGrid(x, dsize, gsize, dlim, method='hex', side=int(0), log=False):\r\n \"\"\"\r\n Implement the non-swarm arrangement methods\r\n dlim: data dimension limit\r\n gsize: group dimension size\r\n dsize: data dimension size\r\n log: must be a function, e.g. np.log10, otherwise/default False\r\n \"\"\"\r\n global d_index, breaks, mids, xx\r\n xx = x\r\n if method == \"hex\": dsize = dsize*np.sqrt(3.0)/2.0\r\n if log:\r\n # get base: will not work with log1p\r\n b = np.exp(np.log(5.0)/log(5.0)) # 5.0, or any constants to reverse calculate base\r\n breaks = b**np.arange(log(dlim[0]), log(dlim[1])+dsize, dsize)\r\n mids = pd.Series(b**(log(breaks[:-1]) + log(breaks[1:]))/2.0)\r\n else: # if data axis is NOT on a log scale\r\n breaks = np.arange(dlim[0], dlim[1]+dsize, dsize)\r\n mids = pd.Series((breaks[:-1] + breaks[1:]) / 2.0)\r\n if len(breaks) == 1 and np.isnan(breaks[0]):\r\n d_index, d_pos = x, x\r\n else:\r\n d_index = pd.Series(pd.cut(pd.Series(x), bins=breaks, labels=False))\r\n d_pos = d_index.apply(lambda x: mids[x])\r\n #print(d_index)\r\n # now determine positions along the group axis\r\n v_s = {}\r\n for item in stable_unique(d_index):\r\n vals = np.arange(list(d_index).count(item))\r\n v_s[item] = {\r\n 'center': {-1: vals - np.max(vals),\r\n 0: vals - np.mean(vals),\r\n 1: vals - 1.0\r\n }.get(side),\r\n 'square': {-1: vals - np.max(vals),\r\n 0: vals - np.floor(np.mean(vals)),\r\n 1: vals -1.0\r\n }.get(side),\r\n 'hex': {-1: vals - np.max(vals) - (0. if (item%2) == 1 else 0.5),\r\n 0: vals - (np.floor(np.mean(vals))+0.25 if (item%2)==1 else np.ceil(np.mean(vals))-0.25),\r\n 1: vals - (1.0 if item%2==1 else 0.5)\r\n }.get(side)\r\n }.get(method, ValueError('Unrecognized method: %s' %(method)))\r\n # raise if is an exception\r\n if isinstance(v_s[item], Exception):\r\n raise(v_s[item])\r\n x_index = unsplit(v_s, d_index)\r\n return(x_index.apply(lambda x: x*gsize), d_pos)\r\n\r\ndef gridx(x, y, xsize, ysize, dlim, method='hex', side=int(0),log=False):\r\n \"\"\" jitter points horizontally\"\"\"\r\n g_offset, d_pos = _calculateGrid(y, dsize=ysize, gsize=xsize, dlim=dlim, method=method, side=side, log=log)\r\n return(g_offset+x, d_pos) # new_x, new_y\r\n\r\ndef gridy(x, y, xsize, ysize, dlim, method='hex', side=int(0), log=False):\r\n \"\"\" jitter points vertically\"\"\"\r\n g_offset, d_pos = _calculateGrid(x, dsize=xsize, gsize=ysize, dlim=dlim, method=method, side=side, log=log)\r\n return(g_offset+y, d_pos) # new_y, new_x\r\n\r\ndef unsplit(x,f):\r\n \"\"\"\r\n same as R's unsplit function\r\n Read of the values specified in f from x to a vector\r\n\r\n Inputs:\r\n x: dictionary of value->[items]\r\n f: vector specifying values to be read off to the vector\r\n \"\"\"\r\n y = pd.DataFrame({'y':[None]*len(f)})\r\n for item in set(f):\r\n y.ix[np.array(f==item),'y'] = x[item]\r\n return(y['y'])\r\n\r\ndef is_numeric(obj):\r\n \"\"\" check if an object is numeric\"\"\"\r\n attrs = ['__add__', '__sub__', '__mul__', '__div__', '__pow__']\r\n return all(hasattr(obj, attr) for attr in attrs)\r\n\r\ndef _corral(positions, g_offset, size_g, ax, corral='none', corralWidth=None):\r\n \"\"\"Implement corral method to check for runaway points\"\"\"\r\n if corral == 'none':\r\n return(g_offset)\r\n if corralWidth is None:\r\n if len(positions)>1:\r\n corralWidth = np.min(positions[-1] - positions[-len(positions)]) - (2*size_g)\r\n else:\r\n corralWidth = 2 * (np.min(np.diff([ax.get_xlim[0]] + positions + [ax.get_ylim[1]])) - size_g)\r\n else:\r\n if not is_numeric(corralWidth):\r\n raise(ValueError('\"corralWidth\" must be a number'))\r\n if corralWidth <=0:\r\n raise(ValueError('\"corralWidth\" must be greater than 0'))\r\n halfCorralWidth = corralWidth / 2.0\r\n # calculate g_offset based on corral method\r\n g_offset = {\r\n 'gutter': [np.minimum(halfCorralWidth, np.maximum(-halfCorralWidth,zz)) for zz in g_offset],\r\n 'wrap': [(zz + halfCorralWidth) % (halfCorralWidth * 2) - halfCorralWidth for zz in g_offset],\r\n 'random': [np.random.uniform(-halfCorralWidth, halfCorralWidth, len(zz)) if (zz > halfCorralWidth).all() or (zz < halfCorralWidth).all() else zz for zz in g_offset],\r\n 'omit': [np.nan if (zz>halfCorralWidth).all() or (zz<-halfCorralWidth).all() else zz for zz in g_offset]\r\n }.get(corral, ValueError('Unrecognized corral method: %s' %(corral)))\r\n if isinstance(g_offset,Exception):\r\n raise(g_offset)\r\n else:\r\n g_offset = np.array(g_offset)\r\n\r\n return(g_offset)\r\n\r\ndef colorvect(factors, df, color=('k','r')):\r\n \"\"\"Parse the color vector.\r\n Cycle through the list of colors provided\r\n \"\"\"\r\n # create group by object\r\n groupby = df.groupby(list(factors), sort=False)\r\n # get indices of unique group\r\n return([color[c] for c in groupby.grouper.group_info[0] % len(color)])\r\n \r\ndef stable_unique(a):\r\n indices = np.unique(a, return_index=True)[1]\r\n return [a[index] for index in sorted(indices)]\r\n\r\n\r\ndef add_average_bar(ax, bs, pos='left', cap_marker_edge_width=1, label_values=None, label_values_offset=0, fmt=\"o\", color=\"k\", *args, **kwargs):\r\n \"\"\"\r\n ax: axis of the beeswarm\r\n bs: data frame returned by beeswarm\r\n pos: position of the average bars of the beeswarm.\r\n - \"left\": left of the swarm\r\n - \"right\": right of the swarm\r\n - a list of custom positions. Needs to be the same length as the number of swarms\r\n cap_marker_edge_width: default 1. This deals with seaborn.\r\n *args, **kwargs: additional arguments for ax.errorbar\r\n \"\"\"\r\n gp = bs.groupby(by='xorig', sort=False)\r\n mean0 = gp.mean()\r\n serr0 = gp.agg(lambda x: np.std(x) / np.sqrt(np.shape(x)[0]))\r\n if pos == 'left':\r\n pos0 = gp.min()['xnew'].values-1.5*gp.std()['xnew']\r\n elif pos == 'right':\r\n pos0 = gp.max()['xnew'].values-1.5*gp.std()['xnew']\r\n elif isinstance(pos, (list, tuple, np.ndarray)): # assume a list of positions\r\n pos0 = np.asarray(pos)\r\n else:\r\n raise(TypeError('Unknown type of pos'))\r\n \r\n _, caps, _ = ax.errorbar(pos0, mean0['ynew'].values, serr0['ynew'].values, fmt=fmt, color=color, *args, **kwargs)\r\n \r\n if cap_marker_edge_width is not None:\r\n for cap in caps:\r\n cap.set_markeredgewidth(cap_marker_edge_width)\r\n \r\n if label_values:\r\n if not isinstance(label_values_offset, (list, tuple, np.ndarray)):\r\n label_values_offset = [label_values_offset]*len(pos0)\r\n for p, yo, m, s in zip(pos0, label_values_offset, mean0['ynew'].values, serr0['ynew'].values):\r\n if isinstance(label_values, str) :\r\n if label_values == 'vertical':\r\n ax.text(p, m+s*1.1+yo, \"{:.1f}\\n$\\pm$\\n{:.1f}\".format(m, s), va='bottom', ha='center')\r\n elif label_values == 'mean':\r\n ax.text(p, m+s*1.1+yo, \"{:.1f}\".format(m), va='bottom', ha='center')\r\n \r\n else:\r\n ax.text(p, m+s*1.1+yo, \"{:.1f}$\\pm${:.1f}\".format(m, s), va='bottom', ha='center')\r\n \r\n return ax\r\n\r\n\r\ndef connect_paired_dots(ax, bs, zorder=0, pairs=None, *args, **kwargs):\r\n \"\"\"\r\n Connect paired beeswarms\r\n ax: axis of the beeswarm\r\n bs: data frame returned by beeswram, assuming the first half is group1 and second half is group 2\r\n pairs: positions of the dots. Default is the unique value of the xorig\r\n *args, **kwargs, additional arguements for ax.plot\r\n \"\"\"\r\n\r\n nrows = bs.shape[0] # should be a even number of rows\r\n if nrows%2 != 0:\r\n raise(ValueError('Number of rows of bs must be even!'))\r\n \r\n num_lines = int(nrows/2)\r\n \r\n if pairs is None:\r\n pairs = np.sort(bs['xorig'].unique())\r\n \r\n # Sepeating the left and right dots\r\n bs0 = bs.loc[bs['xorig']==pairs[0],:]\r\n bs1 = bs.loc[bs['xorig']==pairs[1],:]\r\n \r\n for k in range(num_lines):\r\n ax.plot(np.asarray([bs0.iloc[k]['xnew'], bs1.iloc[k]['xnew']]), \r\n np.asarray([bs0.iloc[k]['ynew'], bs1.iloc[k]['ynew']]), zorder=0, *args, **kwargs)\r\n \r\n return ax\r\n \r\n \r\n \r\n\r\nif __name__=='asdf':#'__main__':\r\n from ImportData import FigureData\r\n df = FigureData(dataFile='D:/Edward/Documents/Assignments/Scripts/Python/Plots/example/beeswarm.csv')\r\n df = df.table\r\n values = 'time_survival'\r\n group = 'ER'\r\n cluster = 'event_survival'\r\n ax, bs = beeswarm(df, values, group=group, cluster=cluster, figsize=(6,5),\r\n method='swarm', legend=('yes','no'), legendtitle='Survival',corral='gutter')\r\n"
},
{
"alpha_fraction": 0.6815476417541504,
"alphanum_fraction": 0.695105791091919,
"avg_line_length": 40.591548919677734,
"blob_id": "76dd8f852304a2ebe8437f09f081aec32837a222",
"content_id": "181762b7115d18c5f00048025e224c053259fb22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3024,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 71,
"path": "/PySynapse/resources/ui_designer/Scope_win.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'Scope_resizable_dock.ui'\r\n#\r\n# Created by: PyQt4 UI code generator 4.11.4\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\nfrom PyQt4 import QtCore, QtGui\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig)\r\n\r\nclass ScopeWindow(QtGui.QMainWindow):\r\n def __init__(self, parent=None, maxepisodes=10, layout=None):\r\n super(ScopeWindow, self).__init__(parent)\r\n self.setupUi(self)\r\n\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(_fromUtf8(\"MainWindow\"))\r\n MainWindow.resize(988, 549)\r\n self.centralwidget = QtGui.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))\r\n self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)\r\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\r\n self.graphicsView = QtGui.QGraphicsView(self.centralwidget)\r\n self.graphicsView.setObjectName(_fromUtf8(\"graphicsView\"))\r\n self.horizontalLayout.addWidget(self.graphicsView)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtGui.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 988, 21))\r\n self.menubar.setObjectName(_fromUtf8(\"menubar\"))\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtGui.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(_fromUtf8(\"statusbar\"))\r\n MainWindow.setStatusBar(self.statusbar)\r\n self.dockWidget = QtGui.QDockWidget(MainWindow)\r\n self.dockWidget.setObjectName(_fromUtf8(\"dockWidget\"))\r\n self.dockWidgetContents = QtGui.QWidget()\r\n self.dockWidgetContents.setObjectName(_fromUtf8(\"dockWidgetContents\"))\r\n self.horizontalLayout_2 = QtGui.QHBoxLayout(self.dockWidgetContents)\r\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\r\n self.listView = QtGui.QListView(self.dockWidgetContents)\r\n self.listView.setObjectName(_fromUtf8(\"listView\"))\r\n self.horizontalLayout_2.addWidget(self.listView)\r\n self.dockWidget.setWidget(self.dockWidgetContents)\r\n MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.dockWidget)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\", None))\r\n\r\nimport sys\r\nif __name__ == '__main__':\r\n app = QtGui.QApplication(sys.argv)\r\n w = ScopeWindow()\r\n w.show()\r\n sys.exit(app.exec_())\r\n"
},
{
"alpha_fraction": 0.6719876527786255,
"alphanum_fraction": 0.6802265644073486,
"avg_line_length": 37.51020431518555,
"blob_id": "4ea1f984fb0bd9d9951e23196aff5989a73530ee",
"content_id": "b458d69b03a37e358c969efed63c1e7f915d5b21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1942,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 49,
"path": "/Plots/__init__.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 27 19:02:40 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n\"\"\"Set up the Plot package\"\"\"\r\n\r\n#import os, glob, shutil\r\n#import matplotlib\r\n# matplotlib.use('Agg') # use 'Agg' backend\r\n# matplotlib.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\r\n\r\n#__all__ = ['FigureData', 'PublicationFigures']\r\n\r\n## First check under ./fonts/ttf that Helvectica family fonts exist\r\n#resourcepath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resource/Helvetica.ttf'))\r\n#rcpath = matplotlib.matplotlib_fname() # get rc file path\r\n#fontpath = os.path.abspath(os.path.join(os.path.dirname(rcpath), 'fonts/ttf/'))\r\n#targetpath = glob.glob(os.path.abspath(os.path.join(fontpath, 'Helvetica*.ttf')))\r\n#try: # try to copy the font to the ttf folder, if have write permission\r\n# if not targetpath: # if font not found, copy from resource folder\r\n# targetpath = os.path.abspath(os.path.join(fontpath, 'Helvetica.ttf'))\r\n# shutil.copy(resourcepath, targetpath) # may fail\r\n# print('Helvetica.ttf is copied to %s' %targetpath)\r\n# # Set up matplotlib properties\r\n# matplotlib.rcParams['font.family'] = 'sans-serif'\r\n# matplotlib.rcParams['font.sans-serif'] = ['Helvetica']\r\n# matplotlib.rcParams['font.size'] = 12.0\r\n#except: # use alternative ways to set up the font\r\n# # use a different backend, as the default backend may not work\r\n# print('Do not have permission to copy the file. Use alternative solution')\r\n# \r\n# prop = matplotlib.font_manager.FontProperties(fname=resourcepath) \r\n\r\n\r\n# get supported file type\r\n# import matplotlib.pyplot as plt\r\n# fig = plt.figure()\r\n# print(fig.canvas.get_supported_filetypes())\r\n\r\n# Get backend in current use\r\n# matplotlib.get_backend()\r\n\r\n# Get a list of available backends\r\n# matplotlib.rcsetup.interactive_bk\r\n# matplotlib.rcsetup.non_interactive_bk\r\n# matplotlib.rcsetup.all_backends\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5689307451248169,
"alphanum_fraction": 0.6119704246520996,
"avg_line_length": 27.780000686645508,
"blob_id": "74156b1554d38f2c0e31fdca112d936bcd3ad6ef",
"content_id": "4bc71abbde22201647ddba486e8be95da82dd912",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1487,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 50,
"path": "/Spikes/GA_spk_sort.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 06 09:02:29 2015\r\n\r\nGA spike detection\r\n\r\nZarifia et al., 2015\r\nA new evolutionary approach for neural spike detection based on genetic algorithm\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport sys\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/Plots\")\r\nfrom ImportData import NeuroData\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/Spikes\")\r\nfrom spk_util import *\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/generic\")\r\nfrom MATLAB import *\r\nfrom matplotlib import pyplot as plt\r\n\r\nimport scipy.signal as sg\r\nimport scipy.optimize as op\r\n\r\ndef GA_spk_detect(Vs, ts):\r\n Vs = np.concatenate(([Vs[0]], Vs, [Vs[-1]]))\r\n psi = Vs[1:-1]**2 - Vs[2:] * Vs[:-2]\r\n Vs = Vs[1:-1] # recover original time series\r\n \r\n def find_C(psi, C):\r\n thresh = C * np.mean(psi)\r\n ind, pks = findpeaks(psi, mph=thresh, mpd=20)\r\n # Calculate SNR\r\n # Debug plot\r\n fig, ax = plt.subplots(nrows=1, ncols=1)\r\n ax.plot(Vs)\r\n ax.plot(ind, 150*np.ones(len(ind)), 'o')\r\n \r\n \r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n datadir = 'D:/Data/Traces/2015/11.November/Data 20 Nov 2015/Slice C.20Nov15.S1.E10.dat'\r\n # Load data\r\n zData = NeuroData(datadir, old=True)\r\n ts = zData.Protocol.msPerPoint\r\n Vs = zData.Current['A']\r\n #Vs = spk_filter(Vs, ts, Wn=[300., 3000.], btype='bandpass')\r\n Vs = spk_window(Vs, ts, [0,5000])"
},
{
"alpha_fraction": 0.5505780577659607,
"alphanum_fraction": 0.586705207824707,
"avg_line_length": 30.272727966308594,
"blob_id": "05d9ed197990852054103d56e23470e74952525b",
"content_id": "e405782d9bcee064c623c2987915d78dcbafd8ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 692,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 22,
"path": "/Spikes/spikedetekt2/experimental/testlook.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Unit tests for the viewdata module.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport sys\nimport time\nimport tempfile\n\nimport numpy as np\n\nfrom spikedetekt2.dataio import *\nfrom klustaviewa.views.viewdata import *\nfrom klustaviewa.views.tests.utils import show_view\nfrom klustaviewa.views import WaveformView, FeatureView\n\n\nwith Experiment('n6mab041109_60sec_n6mab031109_MKKdistfloat_25_regular100_1', dir='data') as exp:\n chgrp = exp.channel_groups[0]\n data = get_waveformview_data(exp, clusters=[0])\n show_view(WaveformView, **data)\n "
},
{
"alpha_fraction": 0.5943499803543091,
"alphanum_fraction": 0.6143254041671753,
"avg_line_length": 44.44744873046875,
"blob_id": "dc0c3c049b497e80057698d53e8f0ff9e8fdc7b2",
"content_id": "250bfae709d2954d4c51e05e46874244489cbd0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15469,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 333,
"path": "/PySynapse/app/Annotations.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 16 19:40:31 2016\r\n\r\nAdding annotations to display as well as export\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport fileinput\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom app.ColorComboBox import ColorDropDownCombobox\r\n\r\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\r\n\r\nsys.path.append(os.path.join(__location__, '..')) # for debug only\r\nfrom util.MATLAB import *\r\n\r\nfrom collections import OrderedDict\r\n\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig)\r\n\r\nclass AnnotationSetting(QtWidgets.QDialog):\r\n # Class variable\r\n ann_obj = ['box', # [x1, y1, x2, y2, linewidth, linestyle, color]\r\n 'line', # [x1, y1, x2, y2, linewidth, linestyle, color]\r\n 'circle', # [center_x, center_y, a, b, rotation, linewidth, linestyle, color]\r\n 'arrow', # [x, y, x_arrow, y_arrow, linewidth, linestyle, color]\r\n 'symbol', # ['symbol', x, y, markersize, color]\r\n 'ttl'] # TTL triggered stimulus [bool_convert_pulse_to_step]\r\n def __init__(self, parent=None, artist=None):\r\n super(AnnotationSetting, self).__init__(parent)\r\n self.setWindowIcon(QtGui.QIcon('resources/icons/setting.png'))\r\n self.isclosed = False\r\n self.parent = parent\r\n if artist is None:\r\n self.initialTypeSelectionDialog() # type of annotation setting to make\r\n else:\r\n self.type = artist['type']\r\n self.artist = dict() if artist is None else artist\r\n self.settingDict = dict()\r\n self.setLayout(QtWidgets.QVBoxLayout())\r\n\r\n # Call the corresponding setting windows to get annotation object properties\r\n if self.type == 'box':\r\n self.setWindowTitle(\"Box Annotations\")\r\n widgetFrame = self.boxSettings()\r\n elif self.type == 'line':\r\n self.setWindowTitle('Line Annotation')\r\n widgetFrame = self.lineSettings()\r\n elif self.type == 'ttl':\r\n self.setWindowTitle(\"TTL Annotation\")\r\n widgetFrame = self.ttlSettings()\r\n else:\r\n raise(NotImplementedError(\"'{}' annotation object has not been implemented yet\".format(self.type)))\r\n \r\n # buttons for saving the settings and exiting the settings window\r\n OK_button = QtWidgets.QPushButton('OK')\r\n OK_button.setDefault(True)\r\n OK_button.clicked.connect(lambda: self.updateSettings(closeWidget=True))\r\n Cancel_button = QtWidgets.QPushButton('Cancel')\r\n Cancel_button.clicked.connect(self.close)\r\n self.buttonGroup = QtWidgets.QGroupBox()\r\n self.buttonGroup.setLayout(QtWidgets.QHBoxLayout())\r\n self.buttonGroup.layout().addWidget(OK_button, 0)\r\n self.buttonGroup.layout().addWidget(Cancel_button, 0)\r\n\r\n self.layout().addWidget(widgetFrame)\r\n self.layout().addWidget(self.buttonGroup)\r\n\r\n def initialTypeSelectionDialog(self, current_index=0):\r\n selected_item, ok = QtWidgets.QInputDialog.getItem(self, \"Select annotation object type\",\r\n \"annotation objects\", self.ann_obj, current_index, False)\r\n self.type = selected_item\r\n\r\n def boxSettings(self):\r\n \"\"\"return a dictionary of the box annotation artist\"\"\"\r\n widgetFrame = QtWidgets.QFrame()\r\n widgetFrame.setLayout(QtWidgets.QGridLayout())\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n widgetFrame.setSizePolicy(sizePolicy)\r\n widgetFrame.setObjectName(_fromUtf8(\"boxSettingsWidgetFrame\"))\r\n\r\n # Settings for a box\r\n x0_label = QtWidgets.QLabel('X0')\r\n x0_text = QtWidgets.QLineEdit(self.parseArtist(field='x0', default='0', return_type=str))\r\n y0_label = QtWidgets.QLabel('Y0')\r\n y0_text = QtWidgets.QLineEdit(self.parseArtist(field='y0', default='0', return_type=str))\r\n w_label = QtWidgets.QLabel('Width')\r\n w_text = QtWidgets.QLineEdit(self.parseArtist(field='width', default='500', return_type=str))\r\n h_label = QtWidgets.QLabel('Height')\r\n h_text = QtWidgets.QLineEdit(self.parseArtist(field='height', default='10', return_type=str))\r\n line_checkbox = QtWidgets.QCheckBox('Line')\r\n line_checkbox.setCheckState(self.parseArtist(field='line', default=2, return_type=bool))\r\n lw_label = QtWidgets.QLabel('Line Width')\r\n lw_text = QtWidgets.QLineEdit(self.parseArtist(field='linewidth', default='0.5669291338582677', return_type=str))\r\n ls_label = QtWidgets.QLabel('Line Style')\r\n ls_text = QtWidgets.QLineEdit(self.parseArtist(field='linestyle', default='-', return_type=str))\r\n ls_text.setToolTip('\"-\" (default), \"--\", \"-.\", \":\"')\r\n lc_label = QtWidgets.QLabel('Line Color')\r\n lc_text = ColorDropDownCombobox(default=self.parseArtist(field='linecolor', default='k', return_type=str))\r\n #lc_text = QtWidgets.QLineEdit(self.parseArtist(field='linecolor', default='k', return_type=str)) # single letters or hex string\r\n lc_text.setToolTip('Single letter or hex value of the color')\r\n fill_checkbox = QtWidgets.QCheckBox('Fill')\r\n fill_checkbox.setCheckState(self.parseArtist(field='fill', default=0, return_type=bool))\r\n fc_label = QtWidgets.QLabel('Fill Color')\r\n fc_text = ColorDropDownCombobox(default=self.parseArtist(field='fillcolor', default='#1f77b4', return_type=str))\r\n # fc_text = QtWidgets.QLineEdit(self.parseArtist(field='fillcolor', default='#1f77b4', return_type=str))\r\n fc_text.setToolTip('Single letter or hex value of the color')\r\n fa_label = QtWidgets.QLabel('Fill Alpha')\r\n fa_text = QtWidgets.QLineEdit(self.parseArtist(field='fillalpha', default='100', return_type=str))\r\n fa_suffix_label = QtWidgets.QLabel('%')\r\n\r\n # Make a dictionary of the values\r\n self.settingDict['x0'] = x0_text\r\n self.settingDict['y0'] = y0_text\r\n self.settingDict['width'] = w_text\r\n self.settingDict['height'] = h_text\r\n self.settingDict['line'] = line_checkbox\r\n self.settingDict['linewidth'] = lw_text\r\n self.settingDict['linestyle'] = ls_text\r\n self.settingDict['linecolor'] = lc_text\r\n self.settingDict['fill'] = fill_checkbox\r\n self.settingDict['fillcolor'] = fc_text\r\n self.settingDict['fillalpha'] = fa_text\r\n\r\n # Add the widgets to the window\r\n widgetFrame.layout().addWidget(x0_label, 0, 0, 1, 1)\r\n widgetFrame.layout().addWidget(x0_text, 0, 1, 1, 1)\r\n widgetFrame.layout().addWidget(y0_label, 0, 2, 1, 1)\r\n widgetFrame.layout().addWidget(y0_text, 0, 3, 1, 1)\r\n widgetFrame.layout().addWidget(w_label, 1, 0, 1, 1)\r\n widgetFrame.layout().addWidget(w_text, 1, 1, 1, 1)\r\n widgetFrame.layout().addWidget(h_label, 1, 2, 1, 1)\r\n widgetFrame.layout().addWidget(h_text, 1, 3, 1, 1)\r\n widgetFrame.layout().addWidget(line_checkbox, 2, 0, 1, 2)\r\n widgetFrame.layout().addWidget(lw_label, 2, 2, 1, 1)\r\n widgetFrame.layout().addWidget(lw_text, 2, 3, 1, 1)\r\n widgetFrame.layout().addWidget(ls_label, 3, 0, 1, 1)\r\n widgetFrame.layout().addWidget(ls_text, 3, 1, 1, 1)\r\n widgetFrame.layout().addWidget(lc_label, 3, 2, 1, 1)\r\n widgetFrame.layout().addWidget(lc_text, 3, 3, 1, 1)\r\n\r\n widgetFrame.layout().addWidget(fill_checkbox, 4, 0, 1, 2)\r\n widgetFrame.layout().addWidget(fc_label, 4, 2, 1, 1)\r\n widgetFrame.layout().addWidget(fc_text, 4, 3, 1, 1)\r\n widgetFrame.layout().addWidget(fa_label, 5, 0, 1, 1)\r\n widgetFrame.layout().addWidget(fa_text, 5, 1, 1, 1)\r\n widgetFrame.layout().addWidget(fa_suffix_label, 5, 2, 1, 1)\r\n\r\n return widgetFrame\r\n\r\n def lineSettings(self):\r\n widgetFrame = QtWidgets.QFrame()\r\n widgetFrame.setLayout(QtWidgets.QGridLayout())\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n widgetFrame.setSizePolicy(sizePolicy)\r\n widgetFrame.setObjectName(_fromUtf8(\"lineSettingsWidgetFrame\"))\r\n\r\n # Settings for a line\r\n x0_label = QtWidgets.QLabel('X0')\r\n x0_text = QtWidgets.QLineEdit(self.parseArtist(field='x0', default='0', return_type=str))\r\n y0_label = QtWidgets.QLabel('Y0')\r\n y0_text = QtWidgets.QLineEdit(self.parseArtist(field='y0', default='0', return_type=str))\r\n x1_label = QtWidgets.QLabel('X1')\r\n x1_text = QtWidgets.QLineEdit(self.parseArtist(field='x1', default='1000', return_type=str))\r\n y1_label = QtWidgets.QLabel('Y1')\r\n y1_text = QtWidgets.QLineEdit(self.parseArtist(field='y1', default='0', return_type=str))\r\n\r\n lw_label = QtWidgets.QLabel('Line Width')\r\n lw_text = QtWidgets.QLineEdit(self.parseArtist(field='linewidth', default='0.5669291338582677', return_type=str))\r\n ls_label = QtWidgets.QLabel('Line Style')\r\n ls_text = QtWidgets.QLineEdit(self.parseArtist(field='linestyle', default='--', return_type=str))\r\n ls_text.setToolTip('\"-\" (default), \"--\", \"-.\", \":\"')\r\n lc_label = QtWidgets.QLabel('Line Color')\r\n lc_text = ColorDropDownCombobox(default=self.parseArtist(field='linecolor', default='k', return_type=str))\r\n # lc_text = QtWidgets.QLineEdit(self.parseArtist(field='linecolor', default='k', return_type=str)) # single letters or hex string\r\n lc_text.setToolTip('Single letter or hex value of the color')\r\n\r\n # make a dictionary of hte vlaues\r\n self.settingDict['x0'] = x0_text\r\n self.settingDict['y0'] = y0_text\r\n self.settingDict['x1'] = x1_text\r\n self.settingDict['y1'] = y1_text\r\n self.settingDict['linewidth'] = lw_text\r\n self.settingDict['linestyle'] = ls_text\r\n self.settingDict['linecolor'] = lc_text\r\n\r\n # Add the widget to the window\r\n widgetFrame.layout().addWidget(x0_label, 0, 0, 1, 1)\r\n widgetFrame.layout().addWidget(x0_text, 0, 1, 1, 1)\r\n widgetFrame.layout().addWidget(y0_label, 0, 2, 1, 1)\r\n widgetFrame.layout().addWidget(y0_text, 0, 3, 1, 1)\r\n widgetFrame.layout().addWidget(x1_label, 1, 0, 1, 1)\r\n widgetFrame.layout().addWidget(x1_text, 1, 1, 1, 1)\r\n widgetFrame.layout().addWidget(y1_label, 1, 2, 1, 1)\r\n widgetFrame.layout().addWidget(y1_text, 1, 3, 1, 1)\r\n widgetFrame.layout().addWidget(lw_label, 2, 2, 1, 1)\r\n widgetFrame.layout().addWidget(lw_text, 2, 3, 1, 1)\r\n widgetFrame.layout().addWidget(ls_label, 3, 0, 1, 1)\r\n widgetFrame.layout().addWidget(ls_text, 3, 1, 1, 1)\r\n widgetFrame.layout().addWidget(lc_label, 3, 2, 1, 1)\r\n widgetFrame.layout().addWidget(lc_text, 3, 3, 1, 1)\r\n\r\n return widgetFrame\r\n\r\n def ttlSettings(self):\r\n \"\"\"return a dictionary of the TTL annotation artist\"\"\"\r\n widgetFrame = QtWidgets.QFrame()\r\n widgetFrame.setLayout(QtWidgets.QGridLayout())\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n widgetFrame.setSizePolicy(sizePolicy)\r\n widgetFrame.setObjectName(_fromUtf8(\"ttlSettingsWidgetFrame\"))\r\n\r\n # Settings for TTL\r\n pulse2step_checkbox = QtWidgets.QCheckBox('Convert Pulse to Step')\r\n pulse2step_checkbox.setCheckState(2)\r\n pulse2step_checkbox.setToolTip(\"Draw a block of short pulses as a continuous step\")\r\n\r\n realpulse_checkbox = QtWidgets.QCheckBox('Draw Real Pulse Width')\r\n realpulse_checkbox.setCheckState(2)\r\n realpulse_checkbox.setToolTip('If unchecked, pulses will be represented as a vertial line only')\r\n\r\n # Make a dictionary of the values\r\n self.settingDict['bool_pulse2step'] = pulse2step_checkbox\r\n self.settingDict['bool_realpulse'] = realpulse_checkbox\r\n\r\n # Add the widgets to the window\r\n widgetFrame.layout().addWidget(pulse2step_checkbox, 0, 0, 1, 1)\r\n widgetFrame.layout().addWidget(realpulse_checkbox, 1, 0, 1, 1)\r\n\r\n return widgetFrame\r\n\r\n\r\n def checkSettingUpdates(self):\r\n \"\"\"Sanity check fields that cannot be empty input\"\"\"\r\n if self.type == 'box':\r\n keys = ['x0', 'y0', 'width', 'height', 'linewidth', 'linestyle', 'linecolor', 'fillcolor', 'fillalpha']\r\n elif self.type == 'line':\r\n keys = ['x0', 'y0', 'x1', 'y1', 'linewidth', 'linestyle', 'linecolor']\r\n elif self.type == 'circle':\r\n return True\r\n elif self.type == 'arrow':\r\n return True\r\n elif self.type == 'symbol':\r\n return True\r\n elif self.type == 'ttl':\r\n return True\r\n else:\r\n return True\r\n\r\n for k in keys:\r\n if self.artist[k] == '':\r\n msg = QtWidgets.QMessageBox()\r\n msg.setWindowTitle(\"Error\")\r\n msg.setText(\"'{}' argument cannot be empty when drawing a '{}'\".format(k, self.type))\r\n msg.exec_()\r\n return False\r\n return True\r\n\r\n def updateSettings(self, closeWidget=False):\r\n for k, v in self.settingDict.items():\r\n if isinstance(v, QtWidgets.QComboBox):\r\n val = v.currentText()\r\n elif isinstance(v, QtWidgets.QLineEdit):\r\n val = v.text()\r\n elif isinstance(v, QtWidgets.QCheckBox):\r\n val = True if v.checkState() > 0 else False\r\n elif isinstance(v, QtWidgets.QSpinBox):\r\n val = v.value()\r\n else:\r\n raise (TypeError('Unrecognized type of setting item'))\r\n\r\n self.artist[k] = val\r\n\r\n # sanity check\r\n state = self.checkSettingUpdates()\r\n if not state:\r\n return\r\n\r\n if closeWidget:\r\n self.accept()\r\n\r\n def parseArtist(self, field, default, return_type=None):\r\n if field in self.artist.keys():\r\n val = self.artist[field]\r\n else:\r\n val = default\r\n\r\n if return_type is not None:\r\n if return_type is bool:\r\n val = bool(val)*2 # for boolean check state\r\n else:\r\n val = return_type(val)\r\n\r\n return val\r\n\r\n\r\nif __name__ == '__main__':\r\n # iniPath = 'D:/Edward/Documents/Assignments/Scripts/Python/PySynapse/resources/config.ini'\r\n # with fileinput.input(iniPath, inplace=True, backup='.bak') as f:\r\n # for line in f:\r\n # if line[0] == '#':\r\n # print('#asdf.mat')\r\n # else:\r\n # print(line, end='')\r\n\r\n app = QtWidgets.QApplication(sys.argv)\r\n ex = AnnotationSetting()\r\n ex.show()\r\n if ex.exec_():\r\n print(ex.artist)\r\n # fff = app.exec_()\r\n # print(ex.artists)\r\n # sys.exit(fff)\r\n\r\n"
},
{
"alpha_fraction": 0.6090124249458313,
"alphanum_fraction": 0.6117107272148132,
"avg_line_length": 32.63551330566406,
"blob_id": "b440356af39625c491d185f90a6baa50c48916be",
"content_id": "4022af4c2d97e8b11c9a606f56afe3c4abcca381",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3706,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 107,
"path": "/PySynapse/archive/Multi_instance_connection.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n//this set it ready to receive a message to a specific chanell, once received, album_slot() will run\r\n QDBusConnection::sessionBus().connect(QString(),QString(), \"open.album\", \"MY_message\", this, SLOT(album_slot(QString)));\r\n//these 3 lines create a signal and send it to DBus\r\nQDBusMessage msg = QDBusMessage::createSignal(\"/\", \"open.album\", \"MY_message\");\r\nmsg=\"text\"\r\nQDBusConnection::sessionBus().send(msg);\r\n...\r\n...\r\nvoid album_slot(const QString &text){\r\nif(text==\"text\") etc\r\n}\r\n\"\"\"\r\n\r\n#import std\r\nimport os, sys\r\n\r\n# import stuff for ipc\r\nimport getpass, pickle\r\n\r\n# Import Qt modules\r\nfrom PyQt4.QtGui import QApplication\r\nfrom PyQt4.QtCore import QSharedMemory, QIODevice, SIGNAL\r\nfrom PyQt4.QtNetwork import QLocalServer, QLocalSocket\r\n\r\nclass SingletonApp(QApplication):\r\n\r\n timeout = 1000\r\n\r\n def __init__(self, argv, application_id=None):\r\n QApplication.__init__(self, argv)\r\n\r\n self.socket_filename = unicode(os.path.expanduser(\"~/.ipc_%s\"\r\n % self.generate_ipc_id()) )\r\n self.shared_mem = QSharedMemory()\r\n self.shared_mem.setKey(self.socket_filename)\r\n\r\n if self.shared_mem.attach():\r\n self.is_running = True\r\n return\r\n\r\n self.is_running = False\r\n if not self.shared_mem.create(1):\r\n print >>sys.stderr, \"Unable to create single instance\"\r\n return\r\n # start local server\r\n self.server = QLocalServer(self)\r\n # connect signal for incoming connections\r\n self.connect(self.server, SIGNAL(\"newConnection()\"), self.receive_message)\r\n # if socket file exists, delete it\r\n if os.path.exists(self.socket_filename):\r\n os.remove(self.socket_filename)\r\n # listen\r\n self.server.listen(self.socket_filename)\r\n\r\n def __del__(self):\r\n self.shared_mem.detach()\r\n if not self.is_running:\r\n if os.path.exists(self.socket_filename):\r\n os.remove(self.socket_filename)\r\n\r\n\r\n def generate_ipc_id(self, channel=None):\r\n if channel is None:\r\n channel = os.path.basename(sys.argv[0])\r\n return \"%s_%s\" % (channel, getpass.getuser())\r\n\r\n def send_message(self, message):\r\n if not self.is_running:\r\n raise Exception(\"Client cannot connect to IPC server. Not running.\")\r\n socket = QLocalSocket(self)\r\n socket.connectToServer(self.socket_filename, QIODevice.WriteOnly)\r\n if not socket.waitForConnected(self.timeout):\r\n raise Exception(str(socket.errorString()))\r\n socket.write(pickle.dumps(message))\r\n if not socket.waitForBytesWritten(self.timeout):\r\n raise Exception(str(socket.errorString()))\r\n socket.disconnectFromServer()\r\n\r\n def receive_message(self):\r\n socket = self.server.nextPendingConnection()\r\n if not socket.waitForReadyRead(self.timeout):\r\n print >>sys.stderr, socket.errorString()\r\n return\r\n byte_array = socket.readAll()\r\n self.handle_new_message(pickle.loads(str(byte_array)))\r\n\r\n def handle_new_message(self, message):\r\n print \"Received:\", message\r\n\r\n# Create a class for our main window\r\nclass Main(QtGui.QMainWindow):\r\n def __init__(self):\r\n QtGui.QMainWindow.__init__(self)\r\n # This is always the same\r\n self.ui=Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n\r\nif __name__ == \"__main__\":\r\n app = SingletonApp(sys.argv)\r\n if app.is_running:\r\n # send arguments to running instance\r\n app.send_message(sys.argv)\r\n else:\r\n MyApp = Main()\r\n MyApp.show()\r\n sys.exit(app.exec_())\r\n"
},
{
"alpha_fraction": 0.7013404965400696,
"alphanum_fraction": 0.7040214538574219,
"avg_line_length": 32.30356979370117,
"blob_id": "db4dc8e2e9903ff7f9f15daf5562218c1ce9e1b2",
"content_id": "5149a71df094c351b60e10f8d988894553ab4092",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1865,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 56,
"path": "/Spikes/spikedetekt2/experimental/kkpython.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Code example showing how to create temporary .fet and .fmask files from\n# files in the new format.\n#\n# Arguments: \n# * channel_group: the channel group index to process\n# * filename: the filename of the KWIK file\n# * params: a dictionary with all KK parameters\n\nimport os\nimport shutil\nimport tempfile\nfrom spikedetekt2.dataio import Experiment\n\n# get the basename (filename without the extension)\nbasename = os.path.splitext(filename)[0]\n\n# Create a temporary working folder where we're going to run KK.\ntmpdir = tempfile.mkdtmp()\ncurdir = os.getpwd()\nos.chdir(tmpdir)\n\n# Create the filenames of the .fet and .fmask files to create.\nfilename_fet = os.path.join(tmpdir, basename + '.fet')\nfilename_fmask = os.path.join(tmpdir, basename + '.fmask')\nfilename_clu = os.path.join(tmpdir, basename + '.clu')\n\nwith Experiment(filename) as exp: # Open in read-only, close the file at the end of the block\n # Load all features and masks in memory.\n # WARNING: this might consume to much Ram ==> need to do it by chunks.\n fm = exp.channel_groups[channel_group].spikes.features_masks[:]\n # fm is a Nspikes x Nfeatures x 2 array (features AND masks)\n fet = fm[:,:,0]\n fmask = fm[:,:,1]\n # Convert to .fet and .fmask.\n # These functions are in (old) spikedetekt.files\n write_fet(fet, filename_fet)\n write_mask(fmask, filename_fmask, fmt=\"%f\")\n\n# Sort out the KK parameters.\nopt = ' '.join(['-{k}={v}'.format(k=k, v=v) for k, v in params.iteritems()])\n\n# Call KK\nos.system(\"klustakwik {fn} {opt}\".format(fn=basename, opt=opt))\n\n# Read the .clu file.\nclu = read_clu(filename_clu)\n\n# Add the clusters to the KWIK file.\nwith Experiment(filename, mode='a') as exp:\n exp.channel_groups[channel_group].spikes.clusters.original[:] = clu\n\n# Delete the temporary folder.\nshutil.rmdir(tmpdir)\n\n# Get back to the original folder.\nos.chdir(curdir)\n"
},
{
"alpha_fraction": 0.5822566747665405,
"alphanum_fraction": 0.5977605581283569,
"avg_line_length": 30.243244171142578,
"blob_id": "94add79f0712183065d89a9096c70248a8a6cecc",
"content_id": "bfc7e74f23cd2712246c3e595243d825cdc7b3e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1161,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 37,
"path": "/generic/tf_sparse.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Utilities for tensorflow sparse operations.\"\"\"\n\nimport numpy as np\nimport scipy.sparse\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import sparse_tensor\n\n\ndef map_values(op, *args):\n \"\"\"\n Applies the `op` to the `.values` tensor of one or more `SparseTensor`s.\n For tensorflow versions below 2.4. For versions above, use function\n `tf.sparse.map_values`.\n \"\"\"\n return sparse_tensor.SparseTensor(args[0].indices, \n op(*[a.values for a in args]),\n args[0].dense_shape)\n\ndef test_map_values():\n w = scipy.sparse.random(10, 10, density=0.1, format=\"coo\", random_state=42)\n indices = np.c_[w.row, w.col]\n values = w.data\n W = tf.sparse.SparseTensor(indices, tf.cast(values, \"float32\"), dense_shape=w.shape)\n W = tf.sparse.reorder(W)\n \n W_mapped = tf.map_fn(\n lambda x: map_values(tf.cumsum, x), W)\n \n tf_out = tf.sparse.to_dense(W_mapped)\n dense_out = np.cumsum(w.toarray(), axis=1) * (w.toarray() > 0)\n \n \n assert np.all(np.abs(tf_out - dense_out) < 1E-3)\n\nif __name__ == '__main__':\n test_map_values()\n \n"
},
{
"alpha_fraction": 0.7042889595031738,
"alphanum_fraction": 0.7246049642562866,
"avg_line_length": 30.660715103149414,
"blob_id": "5b9c766d6627579e195cc87835ef7b0032b283ed",
"content_id": "13524fca6dc45c839fb3a49480e25b48792846ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1772,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 56,
"path": "/fMRI_pipeline/realign4Dnii.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#! /hsgs/projects/jhyoon1/pkg64/pythonpackages/anaconda/bin/python\n\n# -*- coding: utf-8 -*-\n\n'''\nCreated on Thu Feb 12 19:09:21 2014\n@author: Edward Cui\n'''\n\n# Directory of the python package\nPYTHONPACKAGEPATH = '/usr/share/pyshared/'\n \n \n# Python 2/3 compatibility\n#from __future__ import print_function \nimport sys,os\n# Add pyton package path\nsys.path.append(PYTHONPACKAGEPATH)\n# Import function to join paths\n#from os.path import join as pjoin\n#from os.path import split as psplit, abspath\n#import numpy as np\nfrom nipy.algorithms.registration.groupwise_registration import SpaceTimeRealign\nfrom nipy import load_image, save_image\nfrom nipy.utils import example_data\n\n# Input images are provided with the nipy-data package\nrunnames = [example_data.get_filename('fiac', 'fiac0', run + '.nii.gz')\n for run in ('run1', 'run2')]\nruns = [load_image(run) for run in runnames]\n\n# Spatio-temporal realigner assuming interleaved ascending slice order\nR = SpaceTimeRealign(runs, tr=2.5, slice_times='asc_alt_2', slice_info=2)\n\n# If you are not sure what the above is doing, you can alternatively\n# declare slice times explicitly using the following equivalent code\n\"\"\"\ntr = 2.5\nnslices = runs[0].shape[2]\nslice_times = (tr / float(nslices)) *\\\n np.argsort(range(0, nslices, 2) + range(1, nslices, 2))\nprint('Slice times: %s' % slice_times)\nR = SpaceTimeRealign(runs, tr=tr, slice_times=slice_times, slice_info=2)\n\"\"\"\n\n# Estimate motion within- and between-sessions\nR.estimate(refscan=None)\n\n# Resample data on a regular space+time lattice using 4d interpolation\n# Save images\ncwd = abspath(os.getcwd())\nprint('Saving results in: %s' % cwd)\nfor i in range(len(runs)):\n corr_run = R.resample(i)\n fname = 'ra' + psplit(runnames[i])[1]\n save_image(corr_run, fname)"
},
{
"alpha_fraction": 0.738726794719696,
"alphanum_fraction": 0.7559681534767151,
"avg_line_length": 43.411766052246094,
"blob_id": "6723a4870b752c9a17e055712605cd79e0499cce",
"content_id": "9ca2ffdf8cd0377c8c703b43bb28f2c029faae64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 754,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 17,
"path": "/python_tutorials/practice_notes_3_case_study.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\n# Day 3: November 25, 2012\n# Case Study Script\n\nopen_file = open('words.txt');#create a file handle for the text file\nopen_file.readline();#should return the first line in the document\n#>>>'aa\\n'\n#where \\n is the new line delimiter in Windows\n#in Linux, it should be \\r\\n\nopen_file.readline();#a second time calling .readline() method should read the next line\nreadLine=open_file.readline();#store the read line in a variable\n#readWord=readLine.strip();#this should strip the annoying \\n delimiter in Python 2\nprint(readWord);\n#However, in Python 3, it looks like once .readline is stored in a variable\n#it automatically strip the delimiter, thus .strip is not available / useless in Python 3\n\n#This concludes the case study"
},
{
"alpha_fraction": 0.5476933121681213,
"alphanum_fraction": 0.55122309923172,
"avg_line_length": 38.81045913696289,
"blob_id": "26a0c20a494bcf7b4ac23c3a03e051a8c3e4fb07",
"content_id": "fb9a9d3bc050a11d69f55d90a88777142ca6292c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12182,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 306,
"path": "/Spikes/spikedetekt2/spikedetekt2/core/script.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Launching script.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport logging\nimport os\nimport sys\nimport os.path as op\nimport tempfile\nimport argparse\nimport spikedetekt2\n\nimport numpy as np\nimport tables as tb\n\nfrom kwiklib import (Experiment, get_params, load_probe, create_files, \n read_raw, Probe, convert_dtype, read_clusters,\n files_exist, add_clustering, delete_files, exception)\nfrom spikedetekt2.core import run\n\n\n# -----------------------------------------------------------------------------\n# Utility functions\n# -----------------------------------------------------------------------------\ndef _load_files_info(prm_filename, dir=None):\n dir_, filename = op.split(prm_filename)\n dir = dir or dir_\n basename, ext = op.splitext(filename)\n if ext == '':\n ext = '.prm'\n prm_filename = op.join(dir, basename + ext)\n assert op.exists(prm_filename)\n \n # Load PRM file.\n prm = get_params(prm_filename)\n nchannels = prm.get('nchannels')\n assert nchannels > 0\n \n # Find PRB path in PRM file, and load it\n prb_filename = prm.get('prb_file')\n if not op.exists(prb_filename):\n prb_filename = op.join(dir, prb_filename)\n prb = load_probe(prb_filename)\n\n \n # Find raw data source.\n data = prm.get('raw_data_files')\n if isinstance(data, basestring):\n if data.endswith('.dat'):\n data = [data]\n if isinstance(data, list):\n for i in range(len(data)):\n if not op.exists(data[i]):\n data[i] = op.join(dir, data[i])\n \n experiment_name = prm.get('experiment_name')\n \n return dict(prm=prm, prb=prb, experiment_name=experiment_name, nchannels=nchannels,\n data=data, dir=dir) \n \ndef is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\ndef which(program):\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n \ndef print_path():\n print '\\n'.join(os.environ[\"PATH\"].split(os.pathsep))\n \ndef check_path():\n prog = 'klustakwik'\n if not (which(prog) or which(prog + '.exe')):\n print(\"Error: '{0:s}' is not in your system PATH\".format(prog))\n return False\n return True\n\n\n# -----------------------------------------------------------------------------\n# SpikeDetekt\n# -----------------------------------------------------------------------------\ndef run_spikedetekt(prm_filename, dir=None, debug=False, convert_only=False):\n info = _load_files_info(prm_filename, dir=dir)\n experiment_name = info['experiment_name']\n prm = info['prm']\n prb = info['prb']\n data = info['data']\n dir = dir or info['dir']\n nchannels = info['nchannels']\n \n # Make sure spikedetekt does not run if the .kwik file already exists\n # (i.e. prevent running it twice on the same data)\n assert not files_exist(experiment_name, dir=dir, types=['kwik']), \"The .kwik file already exists, please use the --overwrite option.\"\n \n # Create files.\n create_files(experiment_name, dir=dir, prm=prm, prb=prb, \n create_default_info=True, overwrite=False)\n \n # Run SpikeDetekt.\n with Experiment(experiment_name, dir=dir, mode='a') as exp:\n # Avoid reopening the KWD file if it's already opened.\n if isinstance(data, str) and data.endswith('kwd'):\n data = exp._files['raw.kwd']\n run(read_raw(data, nchannels=nchannels), \n experiment=exp, prm=prm, probe=Probe(prb),\n _debug=debug, convert_only=convert_only)\n\n\n# -----------------------------------------------------------------------------\n# KlustaKwik\n# -----------------------------------------------------------------------------\ndef write_mask(mask, filename, fmt=\"%f\"):\n with open(filename, 'w') as fd:\n fd.write(str(mask.shape[1])+'\\n') # number of features\n np.savetxt(fd, mask, fmt=fmt)\n\ndef write_fet(fet, filepath):\n with open(filepath, 'w') as fd:\n #header line: number of features\n fd.write('%i\\n' % fet.shape[1])\n #next lines: one feature vector per line\n np.savetxt(fd, fet, fmt=\"%i\")\n\ndef save_old(exp, shank, dir=None):\n chg = exp.channel_groups[shank]\n \n # Create files in the old format (FET and FMASK)\n fet = chg.spikes.features_masks[...]\n if fet.ndim == 3:\n masks = fet[:,:,1] # (nsamples, nfet)\n fet = fet[:,:,0] # (nsamples, nfet)\n else:\n masks = None\n res = chg.spikes.time_samples[:]\n \n times = np.expand_dims(res, axis =1)\n masktimezeros = np.zeros_like(times)\n \n fet = convert_dtype(fet, np.int16)\n fet = np.concatenate((fet, times),axis = 1)\n mainfetfile = os.path.join(dir, exp.name + '.fet.' + str(shank))\n write_fet(fet, mainfetfile)\n \n if masks is not None:\n fmasks = np.concatenate((masks, masktimezeros),axis = 1)\n fmaskfile = os.path.join(dir, exp.name + '.fmask.' + str(shank))\n write_mask(fmasks, fmaskfile, fmt='%f')\n \ndef run_klustakwik(filename, dir=None, **kwargs):\n # Open the KWIK files in append mode so that we can write the clusters.\n with Experiment(filename, dir=dir, mode='a') as exp:\n name = exp.name\n shanks = exp.channel_groups.keys()\n \n # Set the KlustaKwik parameters.\n params = dict()\n for key, value in kwargs.iteritems():\n if key == 'maskstarts' or key == 'maxpossibleclusters':\n print (\"\\nERROR: All PRM KlustaKwik parameters must now be prefixed by KK_ or they will be ignored.\"\n \"\\nSee https://github.com/klusta-team/example/blob/master/params.prm for an example.\"\n \"\\nPlease update or comment out the parameters to use the defaults, then re-run with klusta --cluster-only.\")\n return False\n \n if key[:3] == 'kk_':\n params[key[3:]] = value\n \n # Check for conditions which will cause KK to fail.\n if not (params.get('maskstarts', 500) <= params.get('maxpossibleclusters', 1000)):\n print \"\\nERROR: Condition not met: MaskStarts <= MaxPossibleClusters.\"\n return False\n \n if (((params.get('maskstarts', 500) == 0) or (params.get('usedistributional', 1) == 0)) and not\n (params.get('minclusters', 100) <= params.get('maxclusters',110) <= params.get('maxpossibleclusters', 1000))):\n print \"\\nERROR: Condition not met: MinClusters <= MaxClusters <= MaxPossibleClusters.\"\n return False\n \n # Switch to temporary directory.\n start_dir = os.getcwd()\n tmpdir = os.path.join(start_dir, '_klustakwik')\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n os.chdir(tmpdir)\n \n for shank in shanks:\n # chg = exp.channel_groups[shank] \n save_old(exp, shank, dir=tmpdir)\n \n # Generate the command for running klustakwik.\n cmd = 'klustakwik {name} {shank} {params}'.format(\n name=name,\n shank=shank,\n params=' '.join(['-{key} {val}'.format(key=key, val=str(val))\n for key, val in params.iteritems()]),\n )\n \n # Save a file with the KlustaKwik run script so user can manually re-run it if it aborts (or edit)\n script_filename = \"runklustakwik_\" + str(shank) + \".sh\"\n scriptfile = open(script_filename, \"w\")\n scriptfile.write(cmd)\n scriptfile.close()\n \n # Run KlustaKwik.\n os.system(cmd)\n \n # Read back the clusters.\n clu_filename = name + '.clu.' + str(shank)\n \n if not os.path.exists(clu_filename):\n print \"\\nERROR: Couldn't open the KlustaKwik output file {0}\".format(clu_filename)\n print (\"This is probably due to KlustaKwik not completing successfully. Please check for messages above.\\n\"\n \"You can re-run KlustaKwik by calling klusta with the --cluster-only option. Please verify the\\n\"\n \"printed parameters carefully, and if necessary re-run with the default KlustaKwik parameters.\\n\"\n \"Common causes include running out of RAM or not prefixing the PRM file KlustaKwik parameters by KK_.\")\n return False\n \n clu = read_clusters(clu_filename)\n \n # Put the clusters in the kwik file.\n add_clustering(exp._files, channel_group_id=str(shank), name='original',\n spike_clusters=clu, overwrite=True)\n add_clustering(exp._files, channel_group_id=str(shank), name='main',\n spike_clusters=clu, overwrite=True)\n \n # Switch back to original dir.\n os.chdir(start_dir)\n \n\n# -----------------------------------------------------------------------------\n# All-in-one script\n# -----------------------------------------------------------------------------\ndef run_all(prm_filename, dir=None, debug=False, overwrite=False, \n runsd=True, runkk=True, convert_only=False):\n \n if not os.path.exists(prm_filename):\n exception(\"The PRM file {0:s} doesn't exist.\".format(prm_filename))\n return\n \n info = _load_files_info(prm_filename, dir=dir)\n experiment_name = info['experiment_name']\n prm = info['prm']\n prb = info['prb']\n data = info['data']\n nchannels = info['nchannels']\n \n if files_exist(experiment_name, dir=dir) & runsd == True:\n if overwrite:\n delete_files(experiment_name, dir=dir, types=('kwik', 'kwx', 'high.kwd', 'low.kwd'))\n else:\n print((\"\\nERROR: A .kwik file already exists. To overwrite, call klusta with the --overwrite option,\\n\"\n \"which will overwrite existing .kwik, .kwx, .high.kwd, and .low.kwd files, or delete them manually first.\"))\n return False \n if runsd:\n run_spikedetekt(prm_filename, dir=dir, debug=debug, convert_only=convert_only)\n if runkk:\n run_klustakwik(experiment_name, dir=dir, **prm)\n \ndef main():\n \n if not check_path():\n return\n \n parser = argparse.ArgumentParser(description='Run SpikeDetekt and/or KlustaKwik.')\n parser.add_argument('prm_file',\n help='.prm filename')\n parser.add_argument('--debug', action='store_true', default=False,\n help='run the first few seconds of the data for debug purposes')\n parser.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the KWIK files if they already exist')\n \n parser.add_argument('--detect-only', action='store_true', default=False,\n help='run only SpikeDetekt')\n parser.add_argument('--cluster-only', action='store_true', default=False,\n help='run only KlustaKwik (after SpikeDetekt has run)')\n parser.add_argument('--convert-only', action='store_true', default=False,\n help='only convert raw data to Kwik format, no spike detection')\n parser.add_argument('--version', action='version', version='Klusta-Suite version {0:s}'.format(spikedetekt2.__version__)) \n\n\n args = parser.parse_args()\n\n runsd, runkk, convert_only = True, True, False\n\n if args.detect_only:\n runkk = False\n if args.cluster_only:\n runsd = False\n if args.convert_only:\n runkk = False\n convert_only = True\n \n run_all(args.prm_file, debug=args.debug, overwrite=args.overwrite,\n runsd=runsd, runkk=runkk, convert_only=convert_only)\n \nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5705219507217407,
"alphanum_fraction": 0.5870985984802246,
"avg_line_length": 37.67033004760742,
"blob_id": "58b30f13d8ae2cffa5576b97e8a9c68257b7f9d4",
"content_id": "e552575dda209cbbae5f1560df9b6ed3ba21b604",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10557,
"license_type": "no_license",
"max_line_length": 215,
"num_lines": 273,
"path": "/EMCNA/ExtractSNP.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCall SNPs for EM\n\"\"\"\n\nimport csv, os #, pickle\nfrom optparse import OptionParser\n#from operator import itemgetter\n\n# These environment variables point to the location of the tools,\n# created temporarily by the script calls upon it.\n# Can also be set permanently with an absolute path\nsamtools = os.getenv(\"PATH_TO_SAMTOOLS\")#\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/samtools-1.1/samtools\"\nbcftools = os.getenv(\"PATH_TO_BCFTOOLS\")#\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/bcftools-1.1/bcftools\"\nSnpSift = os.getenv(\"PATH_TO_SNPSIFT\")#\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/snpEff/SnpSift.jar\"\n\n#os.system(\"echo %s\" %(samtools))\n#os.system(\"echo %s\" %(bcftools))\n#os.system(\"echo %s\" %(SnpSift))\n\n\"\"\"\nParse optional arguments to the script\n\"\"\"\nusage = \"usage: %prog [options] ref_genome norm_bam tum_bam result_dir\"\nparser = OptionParser(usage)\nparser.add_option(\"-r\",\"--chr2user\", dest=\"chr2use\", help=\"chromosome to use, e.g. chr7\", default=None)\nparser.add_option(\"-s\",\"--step\", dest=\"continue_at\", help=\"stepper, used in case program crash; continue at step: 1. Call SNPs; 2. Make bedfiles; 3.Pileup; 4. Counting Reads; 5. Consolidation; Default 0\", default=0)\nparser.add_option(\"-n\",\"--normal_only\", dest=\"call_normal_only\", help=\"only call normal vcf, all subsequent call follows normal heterozygosity\", action=\"store_true\")\noptions, args = parser.parse_args()\nif len(args) != 4:\n parser.error(\"Wrong number of arguments\")\n parser.print_help()\nref_genome = args[0]\nbam_files = args[1:3]\nresult_dir = args[3]\ncontinue_at= options.continue_at\nif options.chr2use is not None:\n chr2use = \"-r \" + options.chr2use\nelse:\n chr2use = \"\"\n#os.system(\"echo here\")\n# os.system(\"echo %s\" %options.chr2use)\n#print(options.call_normal_only)\n#import sys\n#sys.exit()\n# create index files if does not exist\nos.system(\"mkdir -p %s\" %(result_dir))\nfor i, bam in enumerate(bam_files):\n if not os.path.isfile(bam+\".bai\"):\n tmp_bam = os.path.join(result_dir,os.path.basename(bam))\n os.system(\"ln -s %s %s\" %(bam, tmp_bam))\n cmd = \"%s index %s\" %(samtools, tmp_bam)\n bam_files[i] = tmp_bam\n os.system(cmd)\n\n# Step 1: Call SNPs, make snp.vcf files\nvcf_files = []\nprint(\"Step 1: Call SNPs ...\")\nfor i, bam in enumerate(bam_files):\n vcf_files.append(os.path.join(result_dir, os.path.basename(bam).split(\".\")[0]+\".vcf\"))\n if continue_at > 1:\n print(\"Skip\")\n continue\n if i>1 and options.call_normal_only:\n break\n if not os.path.isdir(result_dir):\n os.system('mkdir %s' % result_dir)\n cmd = '%s mpileup -v -I -B -C 50 -q 40 -Q 30 -f %s %s %s | %s call -v -c - | java -jar %s filter \"isHet( GEN[0] )\" > %s' %(samtools, ref_genome, chr2use, bam, bcftools, SnpSift, vcf_files[i])\n print(cmd)\n os.system(cmd)\n\n# Step 2: Make bedfiles and chromomosome dictionaries\nbed_files = []\nchrom_dict =[]\nprint(\"Step 2: Make bedfiles and chromosome dictionaries ...\")\nfor i, vcf in enumerate(vcf_files):\n bed_files.append(os.path.join(result_dir, os.path.basename(vcf).split(\".\")[0]+\".bed\"))\n if continue_at <= 2:\n fh = open(bed_files[i], 'wb') # <-- bypass\n fh_write = csv.writer(fh, delimiter='\\t') # <-- bypass\n else:\n print(\"Skip\")\n chrom = {}\n with open(vcf, \"rb\") as csvfile:\n fid = csv.reader(csvfile, delimiter='\\t')\n for i, row in enumerate(fid):\n if row[0][0] == \"#\":\n continue\n my_row = [row[0], int(row[1])-1, int(row[1])]\n if continue_at <= 2:\n fh_write.writerow(my_row) # <-- bypass\n chrom[(row[0], row[1])] = row[3], row[4]\n csvfile.close()\n if continue_at <= 2:\n fh.close() # <-- bypass\n chrom_dict.append(chrom)\n#with open(os.path.join(result_dir,\"chrom_dict.pkl\"), 'wb') as pickle_save:\n# pickle.dump(chrom_dict, pickle_save)\n\n# Step 3: pileup\npileup_files = []\nprint(\"Step 3: Pileup ...\")\nfor i, bam in enumerate(bam_files):\n # broadcast bed files to generate pileups for each bam file\n pileup_files.append(os.path.join(result_dir,os.path.basename(bam).split(\".\")[0] + \".pileup\"))\n if continue_at > 3:\n print(\"Skip\")\n continue\n if len(bed_files) == 1:\n bi = 0\n else:\n bi = i\n cmd = \"%s mpileup -B -C 50 -q 40 -Q 30 -f %s %s -l %s %s > %s\" %(samtools, ref_genome, chr2use, bed_files[bi], bam, pileup_files[i])\n print(cmd)\n os.system(cmd) # <-- bypass\n\n# counts the alleles from pileup\ndef count_alleles(test_str, ref):\n count_dict = {\"A\":0, \"T\":0, \"C\":0, \"G\":0, \"IN\":0, \"DEL\":0}\n pm_prev = False #keeps track of whether it is in indel\n indel_count = 0\n\n for let in test_str:\n if pm_prev == True:\n indel_count = int(let)\n pm_prev = False\n elif indel_count > 0:\n indel_count -= 1\n continue\n else:\n if let in '.,':\n count_dict[ref] += 1\n elif let == '$':\n continue\n elif let in \"-+\":\n pm_prev = True\n if let in '-':#AFTER ANALYSIS, indels count for ref as well!\n count_dict[\"DEL\"] += 1 #doesn't matter if in or del\n else:\n count_dict[\"IN\"] += 1\n elif let == \"^\":\n indel_count = 1\n elif let == \"*\":\n count_dict[\"DEL\"] += 1 #see samtools doct (not format pg)\n elif let in \"ACGTacgt\" and indel_count == 0:\n upper_let = let.upper()\n count_dict[upper_let] += 1\n else:\n #pass\n print test_str\n print \"fell through... check into this\"\n\n return count_dict\n\n# Step 4: Counting Reads\n#pileup_files=[\"C:\\Users\\Edward\\Desktop\\NGS13_out6_hg19.pileup\",\n#\"C:\\Users\\Edward\\Desktop\\NGS14_out6_hg19.pileup\"]\nprint(\"Step 4: Counting Reads ...\")\ntsv_files = []\nfor i, f in enumerate(pileup_files):\n tsv_files.append(f.replace(\".pileup\", \".tsv\"))\n if continue_at > 4:\n print(\"Skip\")\n continue\n infile_pile = open(f, \"rb\")\n intabfile_pile = csv.reader(infile_pile, delimiter = '\\t')\n outfile = open(tsv_files[i], \"wb\") #<-- bypass\n outtabfile = csv.writer(outfile, delimiter = '\\t') #<--bypass\n\n print \"opening \" + f\n\n rows = []\n\n #make dict for allele values from pileup\n #since it is from the pileup, never gonna be missing a value in the try except part\n allele_dict = {}\n if len(vcf_files) == 1:\n bi = 0\n else:\n bi = i\n\n for j, pile_line in enumerate(intabfile_pile):\n chrom, position, reference, = pile_line[0], pile_line[1], pile_line[2]\n ct_dict = count_alleles(pile_line[4], pile_line[2].upper())\n temp = [chrom, position, chrom_dict[bi][chrom, position][0], chrom_dict[bi][chrom,position][1]] # chrom, position, reference, alternative\n try:\n temp += [ct_dict[k] for k in ['A','T','C','G','IN','DEL']]\n except:\n # print \"missing a value\"\n temp += [\".\"]*6\n allele_dict[(chrom, position)] = temp\n\n for key, value in allele_dict.iteritems():\n rows.append(value)\n\n outtabfile.writerows(rows) #<--bypass\n\n #infile.close()\n infile_pile.close()\n outfile.close() #<--bypass\n\n#tsv_files = [\"C:\\Users\\Edward\\Desktop\\snp_norm11.pileup.norm_tum_11_12_snp.tsv\",\n#\"C:\\Users\\Edward\\Desktop\\snp_tum12.pileup.norm_tum_11_12_snp.tsv\"]\n\n# Step 5: Consolidate\nvariant_dict = {}\nsnp_dict = {}\nprint(\"Step 5: Consolidating ...\")\nfor i, extract_file in enumerate(tsv_files):\n print \"Consolidating \" + extract_file\n infile = open(extract_file, \"rb\")\n intabfile = csv.reader(infile, delimiter = '\\t')\n # write a description line at the top for understanding\n\n #extract the sample information from the two types of files: assuming the\n #first file specified is normal, and second file specified is tumor\n if i == 0:\n mytype = \"normal\"\n else:\n mytype = \"tumor\"\n\n for line in intabfile:\n chrom, pos, ref_base, alt_base, a_ad, t_ad, c_ad, g_ad, in_ad, del_ad = line\n key = chrom, int(pos)\n val_var = [ref_base, alt_base, a_ad, t_ad, c_ad, g_ad, in_ad, del_ad]\n base_dict = {'A':a_ad, 'T':t_ad,'C':c_ad,'G':g_ad, 'IN':in_ad, 'DEL':del_ad}\n snp_row = [ref_base, alt_base, base_dict[ref_base], str(sum([int(base_dict[bs]) for bs in alt_base.split(',')]))]\n if key in variant_dict:\n if mytype == \"normal\":\n variant_dict[key] = val_var + variant_dict[key][8:]\n snp_dict[key] = snp_row + snp_dict[key][4:]\n else: #tumor\n variant_dict[key] = variant_dict[key][:8] + val_var\n snp_dict[key]= snp_dict[key][:4] + snp_row\n else:\n if mytype == \"normal\":\n variant_dict[key] = val_var + [\".\"]*8\n snp_dict[key] = snp_row + [\".\"]*4\n else:\n variant_dict[key] = [\".\"]*8 + val_var\n snp_dict[key] = [\".\"]*4 + snp_row\n\n infile.close()\n#with open(os.path.join(result_dir,\"variant_dict.pkl\"), 'wb') as variant_pickle:\n# pickle.dump(variant_dict, variant_pickle)\n#with open(os.path.join(result_dir,\"snp_dict.pkl\"), 'wb') as snp_pickle:\n# pickle.dump(snp_dict, snp_pickle)\n\n#filters for variants only present in both... Should it be only one?\ndef snp_dict2csv(mydict, target_dir, header = [], row_filter = '.'):\n # sort the dictionary first\n with open(target_dir, \"wb\") as csvfile:\n outfile = csv.writer(csvfile, delimiter = '\\t')\n if not header:\n outfile.writerows([header])\n for key, val in sorted(mydict.iteritems()): # filter out any entries that have missing SNPs\n if any(a == row_filter for a in val):\n continue\n outfile.writerow(list(key)+val)\n csvfile.close()\n\n\nheader = [\"chr\", \"pos\", \"ref_norm\", \"alt_norm\", \"A_norm\",\"T_norm\",\"C_norm\",\"G_norm\",\"INS_norm\",\"DEL_norm\",\n \"ref_norm\", \"alt_tum\", \"A_tum\", \"T_tum\", \"C_tum\", \"G_tum\", \"INS_tum\", \"DEL_tum\"]\nsnp_dict2csv(variant_dict, os.path.join(result_dir, \"shared_snp.txt\"), header)\n\nheader = [\"chr\",\"pos\",\"ref_norm\", \"alt_norm\", \"ref_norm_count\", \"alt_norm_count\",\n \"ref_tum\",\"alt_tum\", \"ref_tum_count\", \"alt_tum_count\"]\nsnp_dict2csv(snp_dict, os.path.join(result_dir, \"shared_snp_count.txt\"), header)\n\n\n#mydict = {('ch3','23234'):1,('ch3','232d434'):3,('ch4','839293'):2,('ch1','339293'):2}\n#for k, v in sorted(mydict.iteritems()):\n"
},
{
"alpha_fraction": 0.636663019657135,
"alphanum_fraction": 0.6520307064056396,
"avg_line_length": 26.53125,
"blob_id": "c014d0767800b66e0da048be75a66f00bcb14496",
"content_id": "98a165364aa6f0106c7f87b3d59954a4c4c99eeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 32,
"path": "/System/Win_VLC_icon.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 15 17:56:32 2015\r\nChange VLC multimedia icons\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport subprocess\r\nimport re\r\n\r\n# Command to list registry key queries\r\ncmd = \"REG QUERY HKCR /f VLC.\"\r\n# Start the process and call Command\r\nVLC = subprocess.check_output(cmd)\r\n# filter out invalid results\r\nVLC = filter(None,VLC.split('\\r\\n'))\r\nVLC = [x for x in VLC if 'VLC' in x]\r\n# Get a list of supported extension\r\nVLCext = [re.findall('VLC.(\\w+)', x) for x in VLC]\r\nVLCext = [a[0] for a in VLCext]\r\n\r\n# Command to change the icon\r\ncmd = \"REG ADD %s\\\\DefaultIcon /f /v (Default) /t REG_SZ /d %s\"\r\n\r\n# Get a list of icon files: needs modification\r\niconfile=[\"VLC.\"+a+\".ico\" for a in VLCext]\r\n\r\nfor n, ext in enumerate(VLCext):\r\n # Get the corresponding icon file\r\n ico = [i for i in iconfile if ext in i]\r\n # Reassign the icon file name\r\n subprocess.call(cmd%(VLC[n], ico))"
},
{
"alpha_fraction": 0.5815715789794922,
"alphanum_fraction": 0.585161566734314,
"avg_line_length": 38.77777862548828,
"blob_id": "7fe9cecf8a6e182ffdc95920599f1f0fac2064af",
"content_id": "308e3cc18be881f20e21190ed57c10edc0cffbf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2507,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 63,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/threshold.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Thresholding routines.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom collections import namedtuple\n\nimport numpy as np\nfrom scipy import signal\n\nfrom spikedetekt2.processing import apply_filter\n\nDoubleThreshold = namedtuple('DoubleThreshold', ['strong', 'weak'])\n\n\n# -----------------------------------------------------------------------------\n# Thresholding\n# -----------------------------------------------------------------------------\ndef get_threshold(raw_data, filter=None, channels=slice(None), **prm):\n \"\"\"Compute the threshold from the standard deviation of the filtered signal\n across many uniformly scattered excerpts of data.\n \n threshold_std_factor can be a tuple, in which case multiple thresholds\n are returned.\n \n \"\"\"\n nexcerpts = prm.get('nexcerpts', None)\n excerpt_size = prm.get('excerpt_size', None)\n use_single_threshold = prm.get('use_single_threshold', True)\n threshold_strong_std_factor = prm.get('threshold_strong_std_factor', None)\n threshold_weak_std_factor = prm.get('threshold_weak_std_factor', None)\n threshold_std_factor = prm.get('threshold_std_factor', \n (threshold_strong_std_factor, threshold_weak_std_factor))\n \n if isinstance(threshold_std_factor, tuple):\n # Fix bug with use_single_threshold=False: ensure that \n # threshold_std_factor has 2 dimensions (threshold_weak_strong, channel)\n threshold_std_factor = np.array(threshold_std_factor)[:,None]\n \n # We compute the standard deviation of the signal across the excerpts.\n # WARNING: this may use a lot of RAM.\n excerpts = np.vstack(\n # Filter each excerpt.\n apply_filter(excerpt.data[:,:], filter=filter)\n for excerpt in raw_data.excerpts(nexcerpts=nexcerpts, \n excerpt_size=excerpt_size))\n \n # Get the median of all samples in all excerpts,\n # on all channels...\n if use_single_threshold:\n median = np.median(np.abs(excerpts))\n # ...or independently for each channel.\n else:\n median = np.median(np.abs(excerpts), axis=0)\n \n # Compute the threshold from the median.\n std = median / .6745\n threshold = threshold_std_factor * std\n \n if isinstance(threshold, np.ndarray):\n return DoubleThreshold(strong=threshold[0], weak=threshold[1])\n else:\n return threshold\n\n"
},
{
"alpha_fraction": 0.3065723776817322,
"alphanum_fraction": 0.3427620530128479,
"avg_line_length": 37.015872955322266,
"blob_id": "b296b1f5d3f7dd5e1f5bb2e780eaf4b52fa8f826",
"content_id": "ad2de1bc11227bb72c88dfdc10aa4026ff7cc7bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2404,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 63,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/tests/test_waveform.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Alignment tests.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\n\nfrom spikedetekt2.processing import extract_waveform, Component\nfrom kwiklib.utils import Probe\n\n\n# -----------------------------------------------------------------------------\n# Test probe\n# -----------------------------------------------------------------------------\nn = 5\nprobe_adjacency_list = [(i, i+1) for i in range(n-1)]\nPROBE = Probe({0: {'channels': range(n),\n 'graph': probe_adjacency_list}})\n\n\n# -----------------------------------------------------------------------------\n# Test component\n# -----------------------------------------------------------------------------\nCHUNK_WEAK = np.array([\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [1, 0, 1, 1, 0],\n [1, 0, 0, 1, 0],\n [0, 1, 0, 1, 1],\n ])\nCHUNK_STRONG = np.array([\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n ])\nCOMPONENT = np.array([(1, 2), (2, 2), (2, 3), \n (3, 3), (4, 3), (4, 4)])\nTHRESHOLD_STRONG = .8\nTHRESHOLD_WEAK = .5\nCHUNK_EXTRACT = THRESHOLD_WEAK * np.random.rand(5, 5)\nCHUNK_EXTRACT[COMPONENT[:,0], COMPONENT[:,1]] = THRESHOLD_WEAK + \\\n (1-THRESHOLD_WEAK) * np.random.rand(COMPONENT.shape[0])\nCHUNK_EXTRACT[2, 2] = (1 + THRESHOLD_STRONG) / 2.\n\n\n# -----------------------------------------------------------------------------\n# Tests\n# -----------------------------------------------------------------------------\ndef test_extract_waveform_1():\n waveform = extract_waveform(Component(COMPONENT),\n chunk_extract=CHUNK_EXTRACT,\n chunk_fil=CHUNK_EXTRACT,\n chunk_raw=CHUNK_EXTRACT,\n extract_s_before=1,\n extract_s_after=2,\n threshold_strong=THRESHOLD_STRONG,\n threshold_weak=THRESHOLD_WEAK, \n probe=PROBE)\n assert waveform.raw.shape == (3, 5)\n assert waveform.fil.shape == (3, 5)\n assert waveform.masks.shape == (5,)\n \n "
},
{
"alpha_fraction": 0.6289617419242859,
"alphanum_fraction": 0.654644787311554,
"avg_line_length": 31.771217346191406,
"blob_id": "297ab4fe7dbcfa3cbdb217c62e11fd1ec1d0731b",
"content_id": "5b1f4f5c33f36ad431bea86f9a7c36e03dcf8e5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9150,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 271,
"path": "/python_tutorials/ThinkPython/practice_notes_3.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\r\n# Day 3: November 25, 2012\r\n\r\n# Strings\r\n#Note for defining a string\r\n#I personally perfer using double quotes, since single quotes are not good\r\n#when the string contains possessive- or contraction-like words, for instance\r\n#>>>ABCD='I'd like this to go';\r\n#>>> File \"<console>\", line 1\r\n#>>> ABCD='I'd like this to go';\r\n#>>> ^\r\n#>>>SyntaxError: invalid syntax\r\n#However, it is legal to do\r\n#>>>ABCD=\"I'd like this to go\";\r\n#Nonetheless, if the string do contain double quotation mark, it looks like\r\n#we still have to switch to single quotation mark.\r\n#The question would be what if the string contains both double and single quotation mark?\r\n#That is, how to define a string with such sentence: I'd like to ask \"him\" about it.\r\n#One way, perhaps is to do concatenation\r\na_word=\"asdfghjkl;\";\r\nfor i in range(0,len(a_word),1):\r\n print(i,\":\",a_word[i]);#this should print out each letter of the string stored in a_word, forward\r\n\r\n#This can be done in another way\r\nfor i in a_word:\r\n print(i);#this should print out each letter of the stringg stored in a_word, forward\r\n\r\nfor j in range(1,len(a_word)+1,1):\r\n print(-j,\":\",a_word[-j]);#this should print out each letter of the string stored in a_word, backwards\r\n\r\n#String Indexing\r\na_word[0:3];#or equivalently\r\na_word[:3];\r\n#both lines should print out the string up to its index 3-1 (total 3 letters)\r\n#>>>'asd'\r\na_word[3:len(a_word)];#or equivalently\r\na_word[3:];\r\n#both lines should print out the string from its index 3 to the end (total len(a_word)-3 letters)\r\n#>>>'fghjkl;'\r\na_word[:];#this should print out the whole string, equivalent to print a_word directly\r\n#Important Note: unlike MATLAB, string in Python are not treated as a matrix/vector\r\n#Strings in Python is immutable, meaning its elements cannot be changed\r\n#Therefore, it will be an error to write\r\n#a_word[3]=\"K\";\r\n#>>>TypeError: 'str' object does not support item assignment\r\n\r\n#String Method\r\nb_word = \"banana\";\r\nnew_b_word = b_word.upper();#this \"S.upper\" method converts all lowercase letter to uppercase letter\r\nprint(new_b_word);\r\n#>>>'BANANA'\r\nA_index = b_word.find('an',3,10);\r\nprint(A_index);\r\n#this \"S.find(substring,start,end)\" method should find the lowest index of specified substring\r\n#notice that even if the end index exceeds the length of the string,\r\n#unlike MATLAB, there will be no message indicating that index exceeds dimension\r\n#if there is no such substring within the searched string, returns -1\r\nthe_word=\"BANANANa\";\r\nthe_word.isupper();#returns true if the string ONLY contains uppercase letter\r\n#>>>False\r\n\r\n#Another note on the notation of help documentation/calltips of Python functions and methods\r\n#for instance, S.find(sub[, start[, end]])\r\n#The notation indicates that \"sub\" (since it is outside the bracket) is required,\r\n#whereas \"start\" is optional (since it is inside a bracket).\r\n#However, once start is specified, \"end\" is now optional\r\n#In another words, \"end\" cannot be specified without \"start\"\r\n\r\n#The \"in\" Operator\r\n#This operator checkes if the string specified before the \"in\" operator\r\n#is the substring of the string specified after the \"in\" operator\r\n\"a\" in \"banana\";\r\n#>>>True\r\n\"seed\" in \"banana\";\r\n#>>>False\r\n\r\n#String Comparison\r\n\"A\"<\"B\" and \"B\"<\"C\"\r\n#>>>True\r\n#Both statements are true, which makes the entire line true\r\n#Strings are compared directly as numbers\r\n#the number that each character corresponds to ASCII\r\n\"a\">\"A\"\r\n#>>>True\r\n\"b\"<\"B\"\r\n#>>>False\r\n# Just like Java, when comparing strings with multiple letters,\r\n# Python compares the first letter of each word, if they are the same,\r\n# Python goes to the second letter, and then compare them.\r\n# A list of words can be organized in such a way\r\n######################################################################################################################\r\n\r\n# Lists\r\nList_A=[\"asdf\",\"jkl;\",\"Such a good weather\"];#list of strings\r\nList_B=[1,3,4,12,234];#list of integers\r\nList_C=[];#empty list\r\nList_Mixed=[1,2.3424,\"sanskrit\",23,\"floating above\", 3.242,\"12.23\"];#mixed different types\r\n#Lists are mutable\r\nList_Mixed[2]='not a sanskrit';\r\n#>>>[1, 2.3424, 'not a sanskrit', 23, 'floating above', 3.242, '12.23']\r\n\r\n#\"in\" operator for lists\r\n\"sanskrit\" in List_Mixed\r\n#>>>False\r\n\"not a sanskrit\" in List_Mixed\r\n#>>>True\r\n\r\n#Nested Lists\r\nList_nested=[\"good\",1.234,[\"bad\",3.1234,32],[2,3,4,5,6]];\r\nList_nested[2];#return the second element of the list List_nested\r\n#>>>['bad', 3.1234, 32]\r\nList_nested[2][1];#call the index 2 element of the list List_nested, then from the returned element, call its index 1 element\r\n#>>>3.1234\r\n\r\n#List Operatoions\r\na=[1,2,3];\r\nb=[4,5,6];\r\nc=a+b;#concatenating a and b\r\nprint(c);\r\n#>>>[1,2,3,4,5,6]\r\nd=a*4;#repeat a 4 times in the new list\r\nprint(d);\r\n#>>>[1,2,3,1,2,3,1,2,3]\r\n\r\n#List indexing--very similar to string indexing\r\nf=c[:3];\r\nprint(f);\r\n#>>>[1, 2, 3]\r\nt=c[3:];\r\nprint(t);\r\n#>>>[4, 5, 6]\r\ns=c[:];\r\nprint(s);\r\n#>>>[1, 2, 3, 4, 5, 6]\r\n\r\n#List Methods\r\nt=['a','b','c'];\r\nt.append('d');#appending another element to the end of the list, void method\r\nprint(t);\r\n#>>>['a', 'b', 'c', 'd']\r\n#compare to\r\nt.append(['e','f','g']);\r\nprint(t);\r\n#>>>['a', 'b', 'c', 'd', ['e', 'f', 'g']]\r\n#To append each element of another list to a list, use extend\r\nt1=['a','b','c'];\r\nt2=['d','e','f','g'];\r\nt1.extend(t2);#appending each element of t2 to t1, void method\r\nprint(t1);\r\n#>>>['a', 'b', 'c', 'd', 'e', 'f', 'g']\r\nt=['adf','gdasdf','deas','adsff','ggas'];\r\nt.sort();#void method\r\nprint(t);#sort the list\r\n#>>>['adf', 'adsff', 'deas', 'gdasdf', 'ggas']\r\n\r\n#Map, filter, and reduce\r\n#one way to sum up all the elements in the list\r\ndef add_all(t):\r\n total=0;\r\n for x in t:\r\n total+=x;#same as JAVA, equivalent to total = total+x;\r\n return total\r\n\r\nt=[1,2,3,4,5];\r\nsum_all=add_all(t);\r\nprint(sum_all);\r\n\r\n#A simpler way to add all elements is using sum()\r\nsum(t);\r\n#Reduce: an operation that combines all the element in a list into a single value\r\n#accumulator: a variable that accumulates the result of each iteration when transversing through a list\r\n#map: an operation that \"maps\" a function to each element in a sequence\r\n\r\n#Deleting elements\r\n#If we know the index of the element\r\nt=['a','b','c','d','e'];\r\nx=t.pop(1);#returns the element being deleted, and modify t after deleting\r\n#List.pop([index]), default_index is the index of the last element\r\nprint(t);\r\n#>>>['a', 'c', 'd', 'e']\r\nprint(x);\r\n#>>>b\r\n#using del() operator gives the same effect\r\nt=['a','b','c','d','e'];\r\ndel(t[1:3]);#delete up to but not including index 3 elements, so, only index 1 and 2 are deleted\r\nprint(t);\r\n#On the other hand, if we know the element itself but not the index of it\r\nt=['a','b','c','d','e'];\r\nt.remove('b');#void method\r\nprint(t);\r\n\r\n#converting between lists and strings\r\ns=\"spam\";\r\nt=list(s);#convert each letter of s into a list of letterz\r\nprint(t);\r\n#>>>['s', 'p', 'a', 'm']\r\ns=\"pining for the fjords\";\r\nt=s.split();#S.split([sep [,maxsplit]]), default_sep = \" \" space, can set maximum number of split\r\nprint(t);\r\n#>>>['pining', 'for', 'the', 'fjords']\r\nt=['pining', 'for', 'the', 'fjords'];\r\ndelimiter=\" \";\r\ns=delimiter.join(t);#join the list of words with delimiter\r\nprint(s);\r\n\r\n# Objects and values\r\na=\"banana\";\r\nb=\"banana\";\r\na is b;#checks if two objects are identical\r\n#>>>True\r\n#This means a and b are the same objects, and of course, with the same value\r\n#However,\r\na=[1,2,3];\r\nb=[1,2,3];\r\na is b;#checks if two objects are identical\r\n#>>>False\r\n#This means that even though a and b have the same value, they are different objects\r\n#Instead, list a and list b are called \"equivalent\", whereas string a and string b are called \"identical\"\r\n\r\n#In comparison\r\na=[1,2,3];\r\nb=a;\r\na is b;\r\n#>>>True\r\n#We call a is being aliased by b\r\n#if the aliased object is mutable, then change of the aliased object affects all of its alias\r\na[2]=100;\r\nprint(a);\r\n#>>>[1, 2, 100]\r\nprint(b);\r\n#>>>[1, 2, 100]\r\n#Notice that b is also changed even though we did not modify it\r\n#This is very different from MATLAB!!!\r\n#Thi must be noted carefully when coding, since it is so error prone\r\n#The question is how to work around this. Look at the following example for some hints\r\n\r\ndef give_tail(t):\r\n return t[1:];#this returns a NEW list, with the original t unmodified\r\n\r\nt=[1,2,3,4,5,6];\r\ns=give_tail(t);\r\nprint(t);\r\n#>>>[1, 2, 3, 4, 5, 6]\r\nprint(s);\r\n#>>>[2, 3, 4, 5, 6]\r\n\r\n#The wrong way to define the function\r\ndef bad_give_tail(t):\r\n t=t[1:];#trying to reassign t, but t does not change.\r\n\r\nt=[1,2,3,4,5,6];\r\ns=bad_give_tail(t);\r\nprint(t);\r\n#>>>[1, 2, 3, 4, 5, 6]\r\nprint(s);\r\n#>>>None\r\n#In another word, without the \"return\" statement, the function is a void function\r\n#On the contraray, MATLAB does a better job at this, without the need to worry about aliasing.\r\n\r\n#To create copies of the original list, use this:\r\nt=[1,2,3,4,5,6];\r\noriginal_list=t[:];#create a copy of the original list, without aliasing\r\nt is original_list;#test to see if they are the same list\r\n#>>>False\r\nt.append(7);\r\nprint(t);\r\n#>>>[1, 2, 3, 4, 5, 6, 7]\r\nprint(original_list);\r\n#>>>[1, 2, 3, 4, 5, 6]\r\n\r\n#This concludes today's study."
},
{
"alpha_fraction": 0.6549019813537598,
"alphanum_fraction": 0.7490196228027344,
"avg_line_length": 30.875,
"blob_id": "ed3e9e477604941363b8759746ccf37e51b1ba57",
"content_id": "f8e0dda8c28d27b05dc39f31438711ecee4f263e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 1020,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 32,
"path": "/PySynapse/resources/config.ini",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Startup\ntheme = whiteboard\ncontinueFromLastSession = True # Not implemented\n# Export Parameters\nfigSizeW = 3 # inches\nfigSizeH = 2 # inches\nfigSizeWMulN = True\nfigSizeHMulN = True\nhSpaceType = Fixed\nhFixedSpace = 10 # fixed horizontal spacing between plots, in terms of percentage of total width\ndpi = 300 # for raster images\nlinewidth = 0.5669291338582677 # trace linewidth\nannotation = Label Only\nmonoStim = True\nshowInitVal = True\nplotStimOnce = False\nfontName = Helvetica\nfontSize = 6\nannotfontSize = 2\nsaveDir = R:\\temp.svg\ngridSpec = Vertically # only relevant for grid arrangement\nstimReflectCurrent = False\nscalebarAt = Last # only relevant for grid arrangement ['all', 'first','last','none']\ntimeRangeMin = 5500\ntimeRangeMax = 7500\nvoltRangeMin = -100\nvoltRangeMax = 50\ncurRangeMin = -500\ncurRangeMax = 15000\nstimRangeMin = -1000\nstimRangeMax = 1000\ncolors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] #1f77b4 tableau10, or odd of tableau20\n"
},
{
"alpha_fraction": 0.5802069902420044,
"alphanum_fraction": 0.6093143820762634,
"avg_line_length": 26.10909080505371,
"blob_id": "a076cf137840cabed1497417840248a057a64fdd",
"content_id": "dfc75838a5aff2abff06275660fb984d0d13f152",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1546,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 55,
"path": "/Plots/archive/Correct_bokeh_states_county_shapes.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 23 13:55:58 2018\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nfrom bokeh.sampledata.us_states import data as states\r\nxs = states['AK']['lons']\r\nxs = np.array(xs)\r\nxs[xs>0] = -xs[xs>0]\r\nstates['AK']['lons'] = list(xs)\r\nimport json\r\nfile_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/US_states.json'\r\nwith open(file_dir,'w') as outfile:\r\n json.dump(states, outfile)\r\n \r\n# %%\r\nimport json\r\nfile_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/US_states.json'\r\nwith open(file_dir, 'r') as data_file:\r\n states = json.load(data_file)\r\n \r\nxs, ys = states['AK']['lons'],states['AK']['lats']\r\n\r\n# transoformation\r\nX = np.array([xs, ys, list(np.ones(len(xs)))])\r\nA = spm_matrix_2d(P=[-77, 5, 0, 0.25, 0.35])\r\nY = A @ X\r\nstates['AK']['lons'] = list(Y[0,:])\r\nstates['AK']['lats'] = list(Y[1,:])\r\n\r\nxs, ys = states['HI']['lons'],states['HI']['lats']\r\nX = np.array([xs, ys, list(np.ones(len(xs)))])\r\nA = spm_matrix_2d(P=[-38, 17, 0, 0.4, 0.4])\r\nY = A @ X\r\nstates['HI']['lons'] = list(Y[0,:])\r\nstates['HI']['lats'] = list(Y[1,:])\r\n\r\n# plot transformation\r\nfig, ax = plt.subplots(1,1)\r\nfig.set_size_inches(7, 4.5)\r\nfor s in states.keys():\r\n xs, ys = states[s]['lons'],states[s]['lats']\r\n ax.plot(xs, ys)\r\n\r\n \r\n# %% save a copy\r\nfile_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/US_states_scaled.json'\r\nwith open(file_dir,'w') as outfile:\r\n json.dump(states, outfile)\r\n\r\n\r\n# %%\r\nfrom bokeh.sampledata.us_counties import data as counties\r\n"
},
{
"alpha_fraction": 0.4540441036224365,
"alphanum_fraction": 0.529411792755127,
"avg_line_length": 19.11111068725586,
"blob_id": "18012675444560973b1c212961a0366bf2387709",
"content_id": "274497776a09a108974505f880fb3a2b13fcbd51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 544,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 27,
"path": "/generic/Algorithm.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 9 16:24:44 2019\n\n@author: edward\n\"\"\"\n\ndef array_overlap(nums1, nums2):\n n_start = max([nums1[0], nums2[0]])\n n_end = min([nums1[-1], nums2[-1]])\n \n print(n_start)\n print(n_end)\n \n nums1 = [n for n in nums1 if n >= n_start and n <= n_end]\n nums2 = [n for n in nums2 if n >= n_start and n <= n_end]\n\n return nums1, nums2\n\n\n\nif __name__ == '__main__':\n nums1 = [1,2, 3, 5, 6]\n nums2 = [3, 4, 7]\n \n print(array_overlap(nums1, nums2))\n\n"
},
{
"alpha_fraction": 0.6134852766990662,
"alphanum_fraction": 0.6722064018249512,
"avg_line_length": 32.34782791137695,
"blob_id": "9b61e2401726fa23b1628dc8ebd5e0723c05655b",
"content_id": "a2d7222fd20a74c164d37517d15412475aed4604",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6318,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 184,
"path": "/python_tutorials/PythonForDataAnalysis/Chapter_4_numpy_part1.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 05 22:57:16 2014\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n# creating ndarray\r\ndata1 = [6, 7.5, 8, 0.1]\r\narr1 = np.array(data1)\r\n\r\narr1.shape# array size\r\narr1.dtype# data type: default float64\r\n\r\ndata2 = [[1,2,3,4],[5,6,7,8]]\r\narr2 = np.array(data2)\r\narr2.shape\r\n\r\narr3 = np.zeros(10) # create 1x10 array of zeros\r\narr4 = np.ones((5,5)) # create 5x5 array of ones\r\narr4 = np.empty((2,3,2)) # create 2x3x2 array of empty\r\n# shape argument needs to be a tuple\r\n# np.empty will return garbage values, insteaad of zeros\r\narr5 = np.arange(15) # create an array from 0 to 14, like range in Python\r\narr6 = np.ones_like(arr5) # take arr5 and build an array of ones the same shape and type as arr5\r\narr7 = np.zeros_like(arr5) #take arr5 and build an array of zeros the same shape and type as arr5\r\narr8 = np.empty_like(arr5) #take arr5 and build an array of empty the same shape and type as arr5\r\n\r\narr9 = np.eye(5)# create 5x5 identity matrix\r\narr10 = np.identity(5) #same as np.eye \r\n\r\n# set and cast data type\r\narr1 = np.array([1,2,3],dtype=np.float64)\r\narr2 = arr1.astype(np.int32) # cast to int32\r\narr3 = np.array(['1.323','-4.23323','5.907','-42.3243'],dtype = np.string_)\r\narr4 = arr3.astype(np.float32) # string to number\r\n\r\n# array element by element operations\r\narr = np.array([[1,2,3],[4,5,6]],dtype=np.float64)\r\narr * arr # elment by element multiplication\r\narr ** 0.5 # element by element raising power to 0.5 (taking square root)\r\n1/arr # element by element inversion: note that the precision will be different\r\n# if carried division on np.int32 or other integer types. The division will be\r\n# essentially a floor division\r\n\r\n# indexing and slicing\r\narr = np.arange(10)\r\narr[5]# get the 6th element, as index starts at 0\r\narr[5:8] # get sub-indices of elements\r\narr[5:9] = 70 # assign index 5 to 9 to 70\r\n\r\narr_slice = arr[5:8] # this creates an aliased view of the arr at slices/indices 5:8\r\narr_slice[1]=12345\r\narr # arr[5] will be changed to 12345\r\n# to trutly copy the data\r\narr_new_slice = arr[5:8].copy()\r\n\r\n# 2D array\r\narr2d = np.array([[1,2,3],[4,5,6],[7,8,9]])\r\narr2d[2] #will get all the elements in row (axis 0) index 2 (3rd row)\r\narr2d[2][1] # get row (axis 0) index 2 element (axis 1) index 1 = 8\r\narr2d[2,1] # same as above\r\narr2d[:2,1:] # along axis 0, get up to (right before) index 2, at axis 1, get from index 1 to the end\r\n\r\n# boolean indexing\r\nnames = np.array(['Bob','Joe','Will','Bob','Will','Joe','Joe'])\r\ndata = randn(7,4)\r\n#suppose each row of data corresponds to names\r\n# get all the values that are corresponding to 'Bob'\r\ndata[names=='Bob']\r\n# get the values starting from the 3rd column\r\ndata[names=='Bob',2:]\r\n# for 'Not' operation, use \"-\"\r\ndata[-(names=='Bob')]\r\ndata[names!='Bob'] # same as above\r\n# use & for AND operation and | for OR operation\r\n\r\n# Fancy indexing: using integer arrays. This will always copy the data\r\narr = np.empty((8,4))\r\nfor i in range(8):\r\n arr[i] = i\r\n\r\narr[[4,3,0,6]] # index from the beginning\r\narr[[-3, -5, -7]] # index from the end\r\n\r\narr = np.arange(32).reshape((8,4))\r\narr[[1,5,7,2],[0,3,1,2]] # pairwise indexing: (1,0), (5,3), ... selected\r\narr[np.ix_([1,5,7,2],[0,3,1,2])] # selecting a square region\r\n\r\n# transposing\r\narr = np.arange(15).reshape((3,5))\r\narr.T\r\n# compute inner product of matrices X'*X\r\narr = np.random.randn(6,3)\r\nnp.dot(arr.T,arr)\r\n# permute for higher dimensions: np.transpose\r\narr = np.arange(16).reshape((2,2,4))\r\narr.transpose((1,0,2))\r\n# swap axis\r\narr.swapaxes(1,2)\r\n\r\n# Example data processing using ndarrays\r\npoints = np.arange(-5,5,0.01) # 1000 eqully spaced points\r\nxs, ys = np.meshgrid(points,points)\r\nimport matplotlib.pyplot as plt\r\nz = np.sqrt(xs**2 + ys**2)\r\nplt.imshow(z, cmap=plt.cm.gray)\r\nplt.colorbar()\r\nplt.title('Image plot of $\\sqrt{x^2 + y^2}$ for a grid of values')\r\n\r\n# x if condition else y alternative\r\nxarr = np.array([1.1,1.2,1.3,1.4,1.5])\r\nyarr = np.array([2.1,2.2,2.3,2.4,2.5])\r\ncond = np.array([True,False,True,True,False])\r\n# suppose take value from xarr if cond is true, otherwise, take from yarr\r\nresult = [(x if c else y) for x, y, c in zip(xarr, yarr, cond)]\r\n#equivalently, alternatively, and more efficiently for large arrays\r\nresult = np.where(cond,xarr,yarr)\r\n# another example: producing an array based on threshold\r\narr = randn(4,4)\r\nnp.where(arr>0,2,-2)# if arr is greater than 0, insert 2, otherwise, insert -2\r\n# consider Excel's if function: if(boolean_testing, value_if_true, value_if_false)\r\n# both np.where and Excel's if function can be nested, e.g.\r\ncond1 = randn(1,100)>0\r\ncond2 = randn(1,100)<0\r\nresult = np.where(cond1 & cond2,0, np.where(cond1,1,np.where(cond2,2,3)))\r\n#alternatively, use boolean expression\r\nresult = 1*cond1 + 2*cond2 + 3* -(cond1 | cond2) #but more difficult to come to first thought\r\n\r\n# statistical:\r\nnp.mean(arr,axis=0)\r\nnp.std(arr,ddof=1)#denominator: N - ddof, default 0\r\n\r\n# boolean arrays\r\nbools = np.arry([True,False,False,True,False])\r\nbools.all\r\nbools.any\r\n\r\n# sorting\r\narr = randn(8)\r\narr.sort() # will return a copy of the sorted array\r\n# unique\r\narr.unique() # applies to both numerics and strings\r\n\r\n# save binary data\r\narr = np.arrange(10)\r\nnp.save('some_array.npy',arr) # save on the disk, if name not appended .npy, will append automatically\r\nnp.load('som_array.npy') # load array\r\nnp.savez('array_archive.npz',a=arr,b=arr) # save as a zip archive of arrays\r\n#when loading an archive, the result will be a dictionary like object\r\n#load text file\r\narr = np.loadtxt('array_ex.txt',delimiter=',')\r\n\r\n# Linear Algebra\r\nx = np.array([[1.,2.,3.],[4.,5.,6.]])\r\ny = np.array([[6.,23.],[-1.,7.],[8.,9.]])\r\nx.dot(y)\r\n#equivalently\r\nnp.dot(x,y)\r\n\r\nfrom numpy.linalg import inv, qr\r\nX = rand(5,5)\r\nmat = X.T.dot(X)\r\ninv(mat) # inverse matrix\r\nq,r = qr(mat) # QR factorization / orthogonaliation\r\n\r\n# random number generation\r\nsamples = np.random.normal(size=(4,4))\r\n\r\n# Example Random Walk\r\nnwalks = 5000\r\nnsteps = 1000\r\ndraws = np.random.randint(0,2,size=(nwalks,nsteps))\r\nsteps = np.where(draws>0,1,-1)\r\nwalks = steps.cumsum(1)\r\n# get max and min of all walks\r\nwalk_max, walk_min = walks.max(), walks.min()\r\n# compute minimum crossing time to 30 or -30\r\nhits30 = (np.abs(walks)>=30).any(1).sum()\r\ncrossing_times = (np.abs(walks)>=30).argmax(1).mean()\r\n\r\n# This concludes today's study"
},
{
"alpha_fraction": 0.5641146302223206,
"alphanum_fraction": 0.5819633603096008,
"avg_line_length": 35.875,
"blob_id": "a554f46d88683b7ea75cd5bb75ec86e2996c92f3",
"content_id": "83ee72ac8ac4f6fe5def9b45c20516054cb5b2f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2129,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 56,
"path": "/Spikes/monitor_series_resistance_Vclamp.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 22 15:51:22 2019\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport os\r\nimport time\r\nfrom spk_util import *\r\nfrom MATLAB import *\r\nfrom importData import *\r\n\r\n\r\ndef monitor_access_resistance_vclamp(ep_file, printResults=True, scalefactor=1.0, window=[995, 1015], fid=None):\r\n zData = NeuroData(dataFile=ep_file, old=True, infoOnly=False)\r\n ts = zData.Protocol.msPerPoint\r\n Is = spk_window(zData.Current['A'], ts, window)\r\n Vs = spk_window(zData.Stimulus['A'], ts, window)\r\n \r\n # Estimate capacitance\r\n R_series, tau, rsquare = spk_vclamp_series_resistance(Is, Vs, ts, window=window, scalefactor=scalefactor, direction='up')\r\n \r\n if printResults:\r\n if fid is not None:\r\n fid.write('{}\\n'.format(ep_file))\r\n fid.write('R_series = {:.4f}\\n'.format(R_series))\r\n fid.write('tau = {:.4f}\\n'.format(tau))\r\n fid.write('rsquare = {:.4f}\\n'.format(rsquare))\r\n fid.write('scale factor = {:.4f}\\n'.format(scalefactor))\r\n \r\n print('{}'.format(ep_file))\r\n print('R_series = {:.4f}'.format(R_series))\r\n print('tau = {:.4f}'.format(tau))\r\n print('rsquare = {:.4f}'.format(rsquare))\r\n print('scale factor = {:.4f}'.format(scalefactor))\r\n \r\n \r\nif __name__ == '__main__':\r\n Path = 'D:/Data/Traces/test'\r\n current_files, _ = SearchFiles(Path, '*.dat', 'D') # get current list of files\r\n pause_timer = 5 # 5 seconds\r\n scalefactor = 1. # current scale factor\r\n if not isempty(current_files):\r\n monitor_access_resistance_vclamp(current_files[-1], scalefactor=scalefactor)\r\n # Start the log\r\n fid = open(os.path.join(Path, 'R_series_monitor.log'), 'a')\r\n while True:\r\n new_files, _ = SearchFiles(Path, '*.dat', 'D')\r\n new_0 = np.setdiff1d(new_files, current_files)\r\n if not isempty(new_0):\r\n current_files = new_files\r\n # call function\r\n monitor_access_resistance_vclamp(current_files[-1], scalefactor=scalefactor, fid=fid)\r\n time.sleep(pause_timer)\r\n \r\n fid.close()\r\n "
},
{
"alpha_fraction": 0.3793273866176605,
"alphanum_fraction": 0.4045499563217163,
"avg_line_length": 38.47058868408203,
"blob_id": "377084a1a8cb4fcd5232df6ad493b194bdeef63d",
"content_id": "9f05eaba54290340304d2c5573b7a66f121cbe73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2022,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 51,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/tests/test_threshold.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Filtering tests.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom nose.tools import assert_almost_equal as almeq\n\nfrom spikedetekt2 import (read_raw, get_threshold, bandpass_filter,)\n\n\n# -----------------------------------------------------------------------------\n# Tests\n# -----------------------------------------------------------------------------\ndef test_get_threshold_1():\n duration = 10.\n sample_rate = 20000.\n filter_low = 10.\n filter_high = 10000.\n nchannels = 5\n nsamples = int(duration * sample_rate)\n nexcerpts = 3\n excerpt_size = 10000\n \n X = np.random.randn(nsamples, nchannels)\n raw_data = read_raw(X)\n \n filter = bandpass_filter(filter_butter_order=3,\n sample_rate=sample_rate,\n filter_low=filter_low,\n filter_high=filter_high,\n )\n \n threshold1 = get_threshold(raw_data, filter=filter, \n nexcerpts=nexcerpts,\n excerpt_size=excerpt_size,\n threshold_std_factor=(2., 4.))\n \n threshold2 = get_threshold(raw_data, filter=filter, \n nexcerpts=nexcerpts,\n excerpt_size=excerpt_size,\n threshold_std_factor=(2., 4.),\n use_single_threshold=False)\n\n threshold3 = get_threshold(raw_data, filter=filter, \n nexcerpts=nexcerpts,\n excerpt_size=excerpt_size,\n threshold_std_factor=np.array([[2.5, 2., 2., 2.3, 1.2], [4.5, 3.2, 1., 2.3, 3.4]]),\n use_single_threshold=False)\n\n # assert np.abs(np.array(threshold) - 4.5) < .5\n \n "
},
{
"alpha_fraction": 0.6483705043792725,
"alphanum_fraction": 0.6795883178710938,
"avg_line_length": 27.363636016845703,
"blob_id": "c3faab36bb1cce8d68ca352fdc675f709eab30cd",
"content_id": "2bb96d2d22610bfe9f1c9f6f106683d7e68a5b63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2915,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 99,
"path": "/tutorial/pyplot_example.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 03 01:41:38 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# line plot\r\n# initialize some data\r\nX = np.arange(0, 10, 0.1)\r\nY1 = np.sin(X)\r\nY2 = np.cos(X)\r\nfig = plt.figure(5, figsize=(8,3))\r\nplt.plot(X,Y1, label=\"Sin\")\r\nplt.plot(X,Y2, label=\"Cos\")\r\nplt.xlabel(\"Time\")\r\nplt.ylabel(\"Signal\")\r\nplt.legend()\r\n\r\n# Scatter plot\r\nfrom sklearn import datasets\r\n# Load iris dataset\r\niris = datasets.load_iris()\r\n\r\nprint(iris.feature_names)\r\n# initialize a figure\r\nfig = plt.figure(1, figsize=(4,3))\r\n# Scatter plot\r\nplt.scatter(iris.data[:,0], iris.data[:,1], c=iris.target)\r\n# Set x/y labels\r\nplt.xlabel(iris.feature_names[0])\r\nplt.ylabel(iris.feature_names[1])\r\n\r\n\r\n# 3D scatter plot\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n# initialize a figure\r\nfig = plt.figure(2, figsize=(8, 6))\r\nax = Axes3D(fig, elev=-150, azim=110)\r\nax.scatter(iris.data[:,0], iris.data[:,1], iris.data[:,2], c=iris.target)\r\nax.set_xlabel(iris.feature_names[0])\r\nax.set_ylabel(iris.feature_names[1])\r\nax.set_zlabel(iris.feature_names[2])\r\nplt.show()\r\n\r\n# Histogram\r\nfig = plt.figure(3, figsize=(4,3))\r\nplt.hist(iris.data[:,2], bins=7, alpha=0.4, hatch=\"/\")\r\nplt.xlabel(iris.feature_names[2])\r\nplt.ylabel('Count')\r\n# grab figure axs for manipulations\r\nax = fig.axes[0]\r\n# Set styles of axes\r\nax.tick_params(axis='both',direction='out')\r\nax.spines['left'].set_visible(True)\r\nax.spines['right'].set_visible(False)\r\nax.spines['top'].set_visible(False)\r\nax.spines['bottom'].set_visible(True)\r\nax.xaxis.set_ticks_position('bottom')\r\nax.yaxis.set_ticks_position('left')\r\n\r\n# Barplot\r\nprint(iris.target_names)\r\n# First, count each target\r\ntarget, count = np.unique(iris.target, return_counts=True)\r\ntarget = target+0.25\r\nfig = plt.figure(4, figsize=(4,3))\r\nplt.bar(target, count, width=0.60, yerr=[5, 8, 10])\r\nplt.xticks(target+0.30, iris.target_names)\r\nax = fig.axes[0]\r\nax.tick_params(axis='both',direction='out')\r\nax.spines['left'].set_visible(True)\r\nax.spines['right'].set_visible(False)\r\nax.spines['top'].set_visible(False)\r\nax.spines['bottom'].set_visible(True)\r\nax.xaxis.set_ticks_position('none')\r\nax.yaxis.set_ticks_position('left')\r\n\r\n\r\n\r\n########### Change font ####################\r\n# initialize a figure\r\nfig = plt.figure(1, figsize=(4,3))\r\n# Scatter plot\r\nplt.scatter(iris.data[:,0], iris.data[:,1], c=iris.target)\r\n# Set x/y labels\r\nplt.xlabel(iris.feature_names[0], family='Arial', fontsize=12)\r\nplt.ylabel(iris.feature_names[1], family='Arial', fontsize=12)\r\n# Set font\r\nimport matplotlib.font_manager as fm\r\nax = fig.axes[0]\r\nfontprop = fm.FontProperties(family='Arial', style=\"normal\", size=12)\r\nax.xaxis.label.set_fontproperties(fontprop)\r\nax.yaxis.label.set_fontproperties(fontprop)\r\n# Set font of tick labels\r\n[a.set_fontproperties(fontprop) for a in ax.get_xticklabels()]\r\n[a.set_fontproperties(fontprop) for a in ax.get_yticklabels()]\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5412293672561646,
"alphanum_fraction": 0.5577211380004883,
"avg_line_length": 28,
"blob_id": "ff7b619405d7626b5de2b8959953abbc34ab7f75",
"content_id": "d8169f06edbe18a73c3b317d33a5507dd4a08672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 667,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 23,
"path": "/Spikes/spikedetekt2/setup.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nsetup(\n name='spikedetekt2',\n version='0.3.0',\n author='Klusta-Team',\n author_email='rossant@github',\n packages=[\n 'spikedetekt2',\n 'spikedetekt2.processing',\n 'spikedetekt2.processing.tests',\n 'spikedetekt2.core',\n 'spikedetekt2.core.tests',\n ],\n entry_points={\n 'console_scripts':\n ['spikedetekt = spikedetekt2.core.script:main',\n ]},\n url='http://klusta-team.github.io',\n license='LICENSE.txt',\n description='SpikeDetekt2, part of the KlustaSuite',\n # long_description=open('README.md').read(),\n)\n"
},
{
"alpha_fraction": 0.5745762586593628,
"alphanum_fraction": 0.6127118468284607,
"avg_line_length": 29.891891479492188,
"blob_id": "1fd5f6bf29699b36c5af100607c8e0154403c70a",
"content_id": "13fe77b00794a984ef3cc0fda88d481345a6e332",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 37,
"path": "/generic/monkey.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 8 15:08:22 2018\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nfrom ImportData import load_trace\r\nfrom spk_util import *\r\n\r\ndef get_SFA_bins(Cell=\"NeocortexChRNBM K.16Oct17\", Episode=\"S1.E1\", num_bins=40):\r\n zData = load_trace([Cell, Episode])\r\n ts = zData.Protocol.msPerPoint\r\n stim = spk_get_stim(zData.Stimulus['A'], ts)\r\n _, spike_times, _ = spk_count(spk_window(zData.Voltage['A'], ts, stim), ts)\r\n \r\n time_bins = np.linspace(0, stim[1]-stim[0], num_bins+1)\r\n time_bins = np.c_[time_bins[:-1], time_bins[1:]]\r\n time_bins = np.c_[time_bins.mean(axis=1), time_bins]\r\n \r\n spike_bins = np.zeros(time_bins.shape[0])\r\n \r\n for n, t in enumerate(time_bins):\r\n spike_bins[n] = np.logical_and(spike_times >= time_bins[n, 1], spike_times < time_bins[n, 2]).sum(dtype=int)\r\n \r\n spike_bins = np.cumsum(spike_bins, dtype=int)\r\n \r\n return spike_bins\r\n\r\n\r\n\r\nspike_bins_1 = get_SFA_bins(Episode=\"S1.E1\")\r\nspike_bins_2 = get_SFA_bins(Episode=\"S1.E6\")\r\nspike_bins_3 = get_SFA_bins(Episode=\"S1.E10\")\r\nsb = np.c_[spike_bins_1, spike_bins_2, spike_bins_3]\r\nmean_sb = sb.mean(axis=1)\r\nserr_sb = serr(sb, axis=1)\r\n"
},
{
"alpha_fraction": 0.5485442876815796,
"alphanum_fraction": 0.5530425906181335,
"avg_line_length": 41.70053482055664,
"blob_id": "16b218e57cb2b241bc76522e20b1818befd24d90",
"content_id": "51d73334381e90cec424d8d0be17168a6d674889",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8003,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 187,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/waveform.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Alignment routines.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\n# -----------------------------------------------------------------------------\n# Utility functions\n# -----------------------------------------------------------------------------\nclass InterpolationError(Exception):\n pass\n\ndef get_padded(Arr, Start, End):\n '''\n Returns Arr[Start:End] filling in with zeros outside array bounds\n \n Assumes that EITHER Start<0 OR End>len(Arr) but not both (raises error).\n '''\n if Start < 0 and End >= Arr.shape[0]:\n raise IndexError(\"Can have Start<0 OR End>len(Arr) but not both.\\n \\\n This error has probably occured because your Thresholds \\n \\\n are aritificially low due to early artifacts\\n \\\n Increase the parameter CHUNKS_FOR_THRESH \")\n if Start < 0:\n StartZeros = np.zeros((-Start, Arr.shape[1]), dtype=Arr.dtype)\n return np.vstack((StartZeros, Arr[:End]))\n elif End > Arr.shape[0]:\n EndZeros = np.zeros((End-Arr.shape[0], Arr.shape[1]), dtype=Arr.dtype)\n return np.vstack((Arr[Start:], EndZeros))\n else:\n return Arr[Start:End]\n \n \n# -----------------------------------------------------------------------------\n# Waveform class\n# -----------------------------------------------------------------------------\nclass Waveform(object):\n def __init__(self, fil=None, raw=None, masks=None, \n s_min=None, # relative to the start of the chunk\n s_fracpeak=None, # relative to the start of the chunk\n s_start=None, # start of the chunk, absolute,\n recording=0,\n channel_group=None):\n self.fil = fil\n self.raw = raw\n self.masks = masks\n self.s_min = s_min # start of the waveform, relative to the start\n # of the chunk\n self.s_start = s_start # start of the chunk, absolute (wrt the exp)\n # peak fractional time of the waveform, absolute (relative to the exp)\n self.sf_offset = s_fracpeak + s_start\n self.s_offset = int(self.sf_offset)\n self.s_frac_part = self.sf_offset - self.s_offset\n self.channel_group = channel_group\n self.recording = recording\n \n def __cmp__(self, other):\n return self.sf_offset - other.sf_offset\n \n def __repr__(self):\n return '<Waveform on channel group {chgrp} at sample {smp}>'.format(\n chgrp=self.channel_group,\n smp=self.sf_offset\n )\n\n\n# -----------------------------------------------------------------------------\n# Waveform extraction\n# -----------------------------------------------------------------------------\ndef extract_waveform(component, chunk_fil=None, chunk_raw=None,\n chunk_extract=None, # =chunk_fil or its abs()\n threshold_strong=None, threshold_weak=None, \n probe=None, **prm):\n s_start = component.s_start # Absolute start of the chunk.\n keep_start = component.keep_start # Absolute start of the kept chunk.\n keep_end = component.keep_end # Absolute end of the kept chunk.\n recording = component.recording # Recording index of the current raw data \n # section\n \n s_before = prm['extract_s_before']\n s_after = prm['extract_s_after']\n \n \n component_items = component.items\n assert len(component_items) > 0\n \n # Get samples and channels in the component.\n if not isinstance(component_items, np.ndarray):\n component_items = np.array(component_items)\n \n # The samples here are relative to the start of the chunk.\n comp_s = component_items[:,0] # shape: (component_size,)\n comp_ch = component_items[:,1] # shape: (component_size,)\n \n # Find the channel_group of the spike.\n # Make sure the channel is in the probe, otherwise pass the waveform.\n if component_items[0][1] not in probe.channel_to_group:\n return None\n channel_group = probe.channel_to_group[component_items[0][1]]\n # List of channels in the current channel group.\n channels = probe.channel_groups[channel_group].channels\n\n # Total number of channels across all channel groups.\n # chunk_extract = chunk_extract[:,probe.channels]\n nsamples, nchannels = chunk_extract.shape\n # nchannels = len(channels)\n # assert nchannels == probe.nchannels\n \n # Get binary mask.\n masks_bin = np.zeros(nchannels, dtype=np.bool) # shape: (nchannels,)\n masks_bin[sorted(set(comp_ch))] = 1\n \n # Get the temporal window around the waveform.\n # These values are relative to the start of the chunk.\n s_min, s_max = np.amin(comp_s) - 3, np.amax(comp_s) + 4 \n s_min = max(s_min, 0)\n s_max = min(s_max, nsamples)\n s_offset = s_start + s_min # absolute offset of the waveform (wrt the exp)\n \n # Extract the waveform values from the data.\n # comp shape: (some_length, nchannels)\n # contains the filtered chunk on weak threshold crossings only\n # small temporal window around the waveform\n comp = np.zeros((s_max - s_min, nchannels), dtype=chunk_extract.dtype)\n comp[comp_s - s_min, comp_ch] = chunk_extract[comp_s, comp_ch]\n # the sample where the peak is reached, on each channel, relative to\n # the beginning\n \n # Find the peaks (relative to the start of the chunk).\n peaks = np.argmax(comp, axis=0) + s_min # shape: (nchannels,)\n # peak values on each channel\n # shape: (nchannels,)\n peaks_values = chunk_extract[peaks, np.arange(0, nchannels)] * masks_bin\n \n # Compute the float masks.\n masks_float = np.clip( # shape: (nchannels,)\n (peaks_values - threshold_weak) / (threshold_strong - threshold_weak), \n 0, 1)\n masks_float = masks_float[channels] # keep shank channels\n \n # Compute the fractional peak.\n power = prm.get('weight_power', 1.)\n comp_normalized = np.clip(\n (comp - threshold_weak) / (threshold_strong - threshold_weak),\n 0, 1)\n comp_power = np.power(comp_normalized, power)\n u = np.arange(s_max - s_min)[:,np.newaxis]\n # Spike frac time relative to the start of the chunk.\n s_fracpeak = np.sum(comp_power * u) / np.sum(comp_power) + s_min\n \n # Realign spike with respect to s_fracpeak.\n s_peak = int(s_fracpeak)\n # Get block of given size around peaksample.\n wave = get_padded(chunk_fil,\n s_peak - s_before - 1,\n s_peak + s_after + 2)\n wave = wave[:,channels] # keep shank channels\n \n # Perform interpolation around the fractional peak.\n old_s = np.arange(s_peak - s_before - 1, s_peak + s_after + 2)\n new_s = np.arange(s_peak - s_before, s_peak + s_after) + (s_fracpeak - s_peak)\n try:\n f = interp1d(old_s, wave, bounds_error=True, kind='cubic', axis=0)\n except ValueError: \n raise InterpolationError(\"Interpolation error at time {0:d}\".format(\n s_offset))\n wave_aligned = f(new_s)\n \n # Get unfiltered spike.\n wave_raw = get_padded(chunk_raw,\n s_peak - s_before,\n s_peak + s_after)\n wave_raw = wave_raw[:,channels] # keep shank channels\n \n # Create the Waveform instance.\n waveform = Waveform(fil=wave_aligned, raw=wave_raw, masks=masks_float, \n s_min=s_min, s_start=s_start, s_fracpeak=s_fracpeak, \n channel_group=channel_group,\n recording=recording)\n \n # Only keep the waveforms that are within the chunk window.\n if keep_start <= waveform.sf_offset < keep_end:\n return waveform\n else:\n return None\n \n \n"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.640625,
"avg_line_length": 43.30769348144531,
"blob_id": "e57cbae0bbc53a4d5bd46b459922778c33f9b484",
"content_id": "e7e5fdfdc7410b1617dd0c6c19d80786a01df285",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 13,
"path": "/Plots/df_display.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "from IPython.display import display, HTML\nfrom IPython.display import display_html\nfrom itertools import chain,cycle\ndef display_side_by_side(*args,titles=cycle([''])):\n html_str=''\n for df,title in zip(args, chain(titles,cycle(['</br>'])) ):\n html_str+='<th style=\"text-align:center\"><td style=\"vertical-align:top\">'\n html_str+=f'<h2>{title}</h2>'\n html_str+=df.to_html().replace('table','table style=\"display:inline\"')\n html_str+='</td></th>'\n display_html(html_str,raw=True)\n\ndisplay_side_by_side(c[:25], c[25:50], c[50:75], c[75:100])\n"
},
{
"alpha_fraction": 0.6115348935127258,
"alphanum_fraction": 0.6215813755989075,
"avg_line_length": 37.233577728271484,
"blob_id": "b51c7dab7bbe0215896c8beef2c890aae1454be0",
"content_id": "ce923c7e0408d4c3e7f4b665eeab27c8db6f77b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5375,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 137,
"path": "/Plots/svg2eps.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 3 18:44:36 2016\r\n\r\nCall Adobe Illustrator to convert .svg to .eps\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport os\r\nimport signal\r\nimport subprocess\r\nimport time\r\nfrom pdb import set_trace\r\n\r\njsx_file_str_AI_CS6 = \"\"\"\r\nfunction exportFigures_AI_CS6(sourceFile, targetFile, exportType, ExportOpts) {\r\n if (sourceFile){ // if not an empty string\r\n var fileRef = new File(sourceFile)\r\n var sourceDoc = app.open(fileRef); // returns the document object\r\n } else { // for empty string, use current active document\r\n sourceDoc = app.activeDocument();\r\n }\r\n var newFile = new File(targetFile) // newly saved file\r\n\r\n switch(exportType){\r\n case 'png':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsPNG24()\r\n ExportOpts.antiAliasing = true;\r\n ExportOpts.transparency = true;\r\n ExportOpts.saveAsHTML = true;\r\n }\r\n // Export as PNG\r\n sourceDoc.exportFile(newFile, ExportType.PNG24, ExportOpts);\r\n case 'tiff':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsTIFF();\r\n ExportOpts.resolution = 600;\r\n ExportOpts.byteOrder = TIFFByteOrder.IBMPC;\r\n ExportOpts.IZWCompression = false;\r\n ExportOpts.antiAliasing = true\r\n }\r\n sourceDoc.exportFile(newFile, ExportType.TIFF, ExportOpts);\r\n case 'svg':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsSVG();\r\n ExportOpts.embedRasterImages = true;\r\n ExportOpts.embedAllFonts = true;\r\n ExportOpts.fontSubsetting = SVGFontSubsetting.GLYPHSUSED;\r\n }\r\n // Export as SVG\r\n sourceDoc.exportFile(newFile, ExportType.SVG, ExportOpts);\r\n case 'eps':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new EPSSaveOptions();\r\n ExportOpts.cmykPostScript = true;\r\n ExportOpts.embedAllFonts = true;\r\n }\r\n // Export as EPS\r\n sourceDoc.saveAs(newFile, ExportOpts);\r\n }\r\n // Close the file after saving. Simply save another copy, do not overwrite\r\n sourceDoc.close(SaveOptions.DONOTSAVECHANGES);\r\n}\r\n\r\n// Use the function to convert the files\r\nexportFigures_AI_CS6(sourceFile=\"{format_source_file}\", targetFile=\"{format_target_file}\", exportType=\"eps\", ExportOpts=null)\r\n// exportFigures_AI_CS6(sourceFile=arguments[0], targetFile=arguments[1], exportType=arguments[2])\r\n\"\"\"\r\n\r\n\r\ndef svg2eps_ai(source_file, target_file, \\\r\n illustrator_path=\"D:/Edward/Software/AdobeIllustratorCS6(64bit)Portable/Support Files/Contents/Windows/Illustrator.exe\",\\\r\n jsx_file_str = jsx_file_str_AI_CS6, DEBUG=False):\r\n \"\"\"Use Adobe Illustrator to convert svg to eps\"\"\"\r\n # Change the strings\r\n jsx_file_str = jsx_file_str.replace('{format_source_file}', source_file)\r\n jsx_file_str = jsx_file_str.replace('{format_target_file}', target_file).replace('\\\\','/')\r\n tmp_f = os.path.abspath(os.path.join(os.path.dirname(target_file), \"tmp.jsx\"))\r\n f = open(tmp_f, 'w')\r\n f.write(jsx_file_str)\r\n f.close()\r\n\r\n # Remove previous target file if already existed\r\n if os.path.isfile(target_file):\r\n os.remove(target_file)\r\n\r\n # subprocess.check_call([illustrator_path, '-run', tmp_f])\r\n cmd = \" \".join(['\"'+illustrator_path+'\"', '-run', '\"'+tmp_f+'\"'])\r\n pro = subprocess.Popen(cmd, stdout=subprocess.PIPE)\r\n # print(pro.stdout)\r\n # continuously check if new files are updated\r\n time.sleep(5.0)\r\n sleep_iter = 5.0\r\n max_sleep_iter = 40\r\n while not os.path.isfile(target_file):\r\n time.sleep(1.0)\r\n sleep_iter = sleep_iter + 1.0\r\n if sleep_iter > max_sleep_iter:\r\n break\r\n\r\n # pro.terminate()\r\n #os.kill(os.getpid(), signal.SIGTERM) # Send the signal to all the process groups\r\n pro.kill()\r\n os.remove(tmp_f)\r\n\r\ndef svg2eps_inkscape(source_file, target_file, \\\r\n inkscape_path='\"D:\\\\Edward\\\\Software\\\\inkscape-0.91-1-win64\\\\inkscape.exe\"'):\r\n \"\"\"Use inkscape to convert svg to eps\"\"\"\r\n # cmd = \"inkscape in.svg -E out.eps --export-ignore-filters --export-ps-level=3\"\r\n cmd = inkscape_path+\" \"+source_file+\" --export-eps=\"+target_file +\" --export-ignore-filters --export-ps-level=3\"\r\n print(cmd) # Problem: text was not kept as text, but converted into paths\r\n pro = subprocess.Popen(cmd, stdout=subprocess.PIPE)\r\n #subprocess.check_call([inkscape_path, source_file, '-E', target_file])\r\n print(pro.stdout)\r\n \r\n#def svg2eps_cloudconvert(source_file, target_file):\r\n# import cloudconvert\r\n# api = cloudconvert.Api('5PGyLT7eAn0yLbnBU3G-7j1JLFWTfcnFUk6x7k_lhuwzioGwqO7bVQ-lJNunsDkrr9fL1JDdjdVog6iDZ31yIw')\r\n# process = api.convert({\"input\": \"upload\",\r\n# \"file\": open('R:/temp.svg', 'rb'),\r\n# \"inputformat\": \"svg\",\r\n# \"outputformat\": \"eps\",\r\n# })\r\n# process.wait()\r\n# process.download()\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n source_file = '\"R:\\\\temp.svg\"'\r\n target_file = '\"R:\\\\temp.eps\"'\r\n illustrator_path=\"D:/Edward/Software/Adobe Illustrator CS6/Support Files/Contents/Windows/Illustrator.exe\"\r\n javascript_path=\"D:\\\\Edward\\\\Documents\\\\Assignments\\\\Scripts\\\\Python\\\\PySynapse\\\\util\\\\ExportDocsAdobeIllustrator.jsx\"\r\n # svg2eps_ai(source_file, target_file)\r\n svg2eps_inkscape(source_file, target_file)\r\n"
},
{
"alpha_fraction": 0.5430342555046082,
"alphanum_fraction": 0.5523313283920288,
"avg_line_length": 32.01860427856445,
"blob_id": "50586f113abee49c00e7c6b6b32d11d7bce4f4c7",
"content_id": "f57223ed59b65f381cfd5d11824e2c30beba9d1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7099,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 215,
"path": "/generic/SparseMatrixFactorizationALS.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport scipy.sparse\nfrom scipy.sparse import csc_matrix, coo_matrix, dok_matrix, load_npz, save_npz\nfrom tqdm import tqdm\nfrom pdb import set_trace\n\ndef multiply_U_W(U, W, R):\n \"\"\"Compute product of U.T and W, \n but only at the position where R is available\"\"\"\n iU, iW = R.tocoo().row, R.tocoo().col #np.nonzero(R)\n #values = np.sum(U[iU, :] * W[iW, :], axis=1)\n values = np.einsum('ij,ij->i', U[iU, :], W[iW, :])\n result = coo_matrix((values, (iU, iW))).tocsc()\n return result\n\ndef R_plus_cv(R, cv):\n \"\"\"Sparse matrix plus a column vector\"\"\"\n R = R.tocsc()\n R.data += np.take(cv, R.indices)\n return R\n\ndef R_plus_rv(R, rv):\n \"\"\"Sparse matrix plus a row vector\"\"\"\n R = R.tocsr()\n R.data += np.take(rv, R.indices)\n return R\n\ndef R_op_cv(R, cv, op='plus'):\n \"\"\"Sparse matrix (+-*/) a column vector\"\"\"\n R = R.tocsc()\n if op == 'plus':\n R.data += np.take(cv, R.indices)\n elif op == 'minus':\n R.data -= np.take(cv, R.indices)\n elif op == 'multiply':\n R.data *= np.take(cv, R.indices)\n elif op == 'divide':\n R.data /= np.take(cv, R.indices)\n else:\n raise(ValueError(f'Unknown operator {op}'))\n return R\n\ndef R_op_rv(R, rv, op='plus'):\n \"\"\"Sparse matrix (+-*/) a row vector\"\"\"\n R = R.tocsr() \n if op == 'plus':\n R.data += np.take(rv, R.indices)\n elif op == 'minus':\n R.data -= np.take(rv, R.indices)\n elif op == 'multiply':\n R.data *= np.take(rv, R.indices)\n elif op == 'divide':\n R.data /= np.take(rv, R.indices)\n else:\n raise(ValueError(f'Unknown operator {op}'))\n return R\n\ndef sparse_normalize(R, axis=1):\n \"\"\"Normalize a sparse matrix along an axis\"\"\"\n if axis==1:\n R = R.tocsc()\n else:\n R = R.tocsr()\n norm = R.copy()\n norm.data = norm.data**2\n norm = np.sqrt(norm.sum(axis=axis).A.flatten())\n R.data /= np.take(norm, R.indices)\n\n return R\n\ndef sparse_mean(R, axis=0):\n \"\"\"Compute mean of a sparse matrix along a certain axis\"\"\"\n R = R.tocsc()\n # Compute mean\n R_mean = np.array(R.sum(axis=axis) / np.expand_dims(R.getnnz(axis=axis), axis=axis)) # dense, kept dim\n return R_mean\n\ndef sparse_var(R, axis=0):\n \"\"\"Compute variance of a sparse matrix along a certain axis\"\"\"\n R = R.tocsc()\n # Compute mean\n R_mean = sparse_mean(R, axis=axis)\n # Compute variance\n R_var = np.array(R.multiply(R).sum(axis=axis) / np.expand_dims(R.getnnz(axis=axis), axis=axis)) - R_mean**2\n return R_var\n\ndef sparse_std(R, axis=0):\n \"\"\"Compute standard deviation of a sparse matrix along a certain axis\"\"\"\n # Compute variance\n R_var = sparse_var(R, axis=axis)\n # Compute standard deviation\n R_std = np.sqrt(R_var)\n return R_std\n\ndef sparse_stats(R, axis=0):\n R = R.tocsc()\n R_mean = np.array(R.sum(axis=axis) / np.expand_dims(R.getnnz(axis=axis), axis=axis)) # dense, kept dim\n R_var = np.array(R.multiply(R).sum(axis=axis) / np.expand_dims(R.getnnz(axis=axis), axis=axis)) - R_mean**2\n R_std = np.sqrt(R_var)\n return R_mean, R_var, R_std\n\ndef sparse_symmetrify(R):\n R = R + R.T.multiply(R.T > R) - R.multiply(R.T > R)\n return R\n \ndef regularized_matrix_factorization(R, K=25, l2_reg=0.001, maxiter=100, random_state=42):\n \"\"\"\n Parameters:\n --------\n * R : rating sparse matrix of size N x M\n * K: number of features to use\n * l2_reg: L2 regularization size\n * maxiter : max number of iterations\n * random_state: for testing purposes only\n Returns:\n --------\n U : N x K user matrix\n W : M x K item matrix\n b : length N vector of user bias\n c : length M vector of item bias\n mu: global mean of the user rating matrix\n \"\"\"\n N, M = R.shape\n R = R.tocsc()\n # Calculate global mean mu\n mu = R.mean()\n \n # Initialize the user and item matrix\n rs = np.random.RandomState(random_state)\n U = rs.rand(N, K)\n W = rs.rand(M, K)\n U_W_prod = multiply_U_W(U, W, R) # sparse N x M\n \n # Initialize the bias terms\n b = rs.rand(N)\n c = rs.rand(M)\n \n # Counting cardinalities\n card_psi = R.getnnz(axis=1)\n card_omega = R.getnnz(axis=0)\n total_card = R.getnnz()\n \n # Initialize loss\n J = np.zeros(maxiter)\n \n # Iterate\n for epoch in tqdm(range(maxiter)):\n #print('epoch: ', epoch)\n # Compute prediction\n R_hat = U_W_prod.copy()\n R_hat.data += mu # add mu\n R_hat = R_plus_cv(R_hat, b) # add b, of length N\n R_hat = R_plus_rv(R_hat, c).tocsc() # add c, of length M\n \n # Compute regularized loss\n J[epoch] = ((R.data - R_hat.data)**2).sum() + l2_reg*(np.sum(U**2) + np.sum(W**2) + np.sum(b**2) + np.sum(c**2))\n #J[epoch] = np.sum((R.toarray() - R_hat.toarray())**2)\n \n J[epoch] = J[epoch] / total_card # mean squared error\n \n # Update the parameters\n R_b_c_mu = R.copy()\n R_b_c_mu.data -= mu\n R_b_c_mu = R_plus_cv(R_b_c_mu, -b)\n R_b_c_mu = R_plus_rv(R_b_c_mu, -c).tocsc()\n \n U = np.linalg.solve(W.T @ W + l2_reg * np.eye(K)[np.newaxis, :, :], \n R_b_c_mu @ W) # NxK = solve(1xKxK, NxK)\n W = np.linalg.solve(U.T @ U + l2_reg * np.eye(K)[np.newaxis, :, :], \n R_b_c_mu.T @ U) # MxK = solve(1xKxK, MxK) \n U_W_prod = multiply_U_W(U, W, R)\n \n R_u_mu = R - U_W_prod\n R_u_mu.data -= mu\n R_u_c_mu = R_u_mu.copy()\n R_u_c_mu = R_plus_rv(R_u_c_mu, -c).tocsc()\n R_u_c_mu = np.asarray(R_u_c_mu.sum(axis=1)).flatten()\n R_u_b_mu = R_u_mu.copy()\n R_u_b_mu = R_plus_cv(R_u_b_mu, -b)\n R_u_b_mu = np.asarray(R_u_b_mu.sum(axis=0)).flatten()\n \n b = 1 / (card_psi + l2_reg) * R_u_c_mu # length N\n c = 1 / (card_omega + l2_reg) * R_u_b_mu # legnth M\n \n return U, W, b, c, mu, J, R_hat\n\nif __name__ == '__main__':\n tmp_A = np.array([[1,2,0], [0, 1, 3], [3, 2, 0], [1, 0, 5]], dtype=float)\n print(tmp_A)\n nan_A = tmp_A.copy()\n nan_A[nan_A<1E-6] = np.nan\n print(nan_A)\n A = scipy.sparse.csc_matrix(tmp_A)\n row_mean, row_var, row_std = sparse_stats(A, axis=0)\n print('sparse mean', row_mean)\n print('sparse var', row_var)\n print('sparse std', row_std)\n A_new = R_plus_rv(A, -row_mean.ravel())\n print('remove mean', A_new.toarray())\n A_new = R_op_rv(A_new, row_std.ravel(), op='divide')\n print('divide std', A_new.toarray())\n\n new_row_mean, _, new_row_std = sparse_stats(A_new, axis=0)\n print('sparse new mean:', new_row_mean)\n print('sparse new std:', new_row_std)\n\n print('numpy mean:', np.nanmean(nan_A, axis=0))\n print('numpy var:', np.nanvar(nan_A, axis=0))\n print('numpy std:', np.nanstd(nan_A, axis=0))\n SS = nan_A - np.nanmean(nan_A, axis=0, keepdims=True)\n print('remove_mean', SS)\n SS = SS/np.nanstd(nan_A, axis=0, keepdims=True)\n print('divide std', SS)\n print('numpy new mean: ', np.nanmean(SS, axis=0))\n print('numpy new std: ', np.nanstd(SS, axis=0))\n"
},
{
"alpha_fraction": 0.6130030751228333,
"alphanum_fraction": 0.6274510025978088,
"avg_line_length": 25.742856979370117,
"blob_id": "c0aac3dba13e93084beac832500cb7661bf301da",
"content_id": "919ad98720bb424b0c11fef3ead361ee70856481",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 969,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 35,
"path": "/PySynapse/util/Interface.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 4 14:55:04 2017\r\n\r\nXMessage Interface for windows.\r\nAllows to load variable from the current MATLAB session.\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport os\r\nimport ctypes\r\nimport matlab.engine\r\n\r\nclass MATLABListener(object):\r\n def __init__(self):\r\n # ml = matlab.engine.start_matlab()\r\n future = matlab.engine.connect_matlab(async=True)\r\n eng = future.result()\r\n # eng.sqrt(4.0) # calls MATLAB's function\r\n\r\nclass XDMessage(object):\r\n lib_path = 'D:/Edward/Documents/Assignments/Scripts/Python/PySynapse/resources/lib/XDMessaging.dll'\r\n def __init__(self, lib_path=None):\r\n self.lib_path = lib_path\r\n if lib_path is not None:\r\n self.lib = ctypes.cdll.LoadLibrary(lib_path)\r\n \r\n def makeListener(self):\r\n listener = self.lib.XDListener()\r\n return\r\n \r\n def makeBroadcaster(self):\r\n client = self.lib.XDMessagingClient()\r\n return"
},
{
"alpha_fraction": 0.5289793014526367,
"alphanum_fraction": 0.5319201350212097,
"avg_line_length": 45.34659194946289,
"blob_id": "14672ec2b6997bdff865133e3782b32d9ee55f55",
"content_id": "fa4c92b7f1943f8d9f5fa0e95462afd82824cdf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8161,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 176,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/graph.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Graph routines.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nfrom itertools import izip\n\nimport numpy as np\n\n\n# -----------------------------------------------------------------------------\n# Component class\n# -----------------------------------------------------------------------------\nclass Component(object):\n def __init__(self, items=None, chunk=None, \n s_start=0, keep_start=0, keep_end=np.inf,\n recording=0):\n self.items = items\n self.chunk = chunk\n if not chunk:\n self.s_start = s_start\n self.keep_start = keep_start\n self.keep_end = keep_end\n self.recording = recording\n else:\n self.s_start = chunk.s_start\n self.keep_start = chunk.keep_start\n self.keep_end = chunk.keep_end\n self.recording = chunk.recording\n \n def __repr__(self):\n return '<Component {0:s}>'.format(self.items)\n\n \n# -----------------------------------------------------------------------------\n# Graph\n# -----------------------------------------------------------------------------\ndef _to_tuples(x):\n return ((i, j) for (i, j) in x)\n \ndef _to_list(x):\n return [(i, j) for (i, j) in x]\n\ndef connected_components(chunk_weak=None, chunk_strong=None, \n chunk=None, probe_adjacency_list=None, \n return_objects=True,\n **prm):\n \"\"\"\n Return a list of pairs (samp, chan) of the connected components in the 2D\n array chunk_weak, where a pair is adjacent if the samples are within join_size of\n each other, and the channels are adjacent in probe_adjacency_list, the channel graph.\n \n Arguments:\n \n * chunk_weak: Nsamples x Nchannels array with weak threshold crossings\n * chunk_strong: Nsamples x Nchannels array with strong threshold crossings\n * probe_adjacency_list: a dict {channel: [neighbors]}\n * join_size: the number of samples defining the tolerance in time for\n finding connected components\n \n \"\"\"\n \n join_size = prm.get('join_size', prm.get('connected_component_join_size', 0))\n \n if probe_adjacency_list is None:\n probe_adjacency_list = {}\n \n if chunk_strong is None:\n chunk_strong = chunk_weak\n \n assert chunk_weak.shape == chunk_strong.shape\n \n # set of connected component labels which contain at least one strong \n # node\n strong_nodes = set()\n \n n_s, n_ch = chunk_weak.shape\n join_size = int(join_size)\n \n # an array with the component label for each node in the chunk\n label_buffer = np.zeros((n_s, n_ch), dtype=np.int32)\n \n # component indices, a dictionary with keys the label of the component\n # and values a list of pairs (sample, channel) belonging to that component \n comp_inds = {}\n # mgraph is the channel graph, but with edge node connected to itself\n # because we want to include ourself in the adjacency. Each key of the\n # channel graph (a dictionary) is a node, and the value is a set of nodes\n # which are connected to it by an edge\n mgraph = {}\n for source, targets in probe_adjacency_list.iteritems():\n # we add self connections\n mgraph[source] = targets.union([source])\n # label of the next component\n c_label = 1\n # for all pairs sample, channel which are nonzero (note that numpy .nonzero\n # returns (all_i_s, all_i_ch), a pair of lists whose values at the\n # corresponding place are the sample, channel pair which is nonzero. The\n # lists are also returned in sorted order, so that i_s is always increasing\n # and i_ch is always increasing for a given value of i_s. izip is an\n # iterator version of the Python zip function, i.e. does the same as zip\n # but quicker. zip(A,B) is a list of all pairs (a,b) with a in A and b in B\n # in order (i.e. (A[0], B[0]), (A[1], B[1]), .... In conclusion, the next\n # line loops through all the samples i_s, and for each sample it loops\n # through all the channels.\n for i_s, i_ch in izip(*chunk_weak.nonzero()):\n # the next two lines iterate through all the neighbours of i_s, i_ch\n # in the graph defined by graph in the case of edges, and\n # j_s from i_s-join_size to i_s.\n for j_s in xrange(i_s-join_size, i_s+1):\n # allow us to leave out a channel from the graph to exclude bad\n # channels\n if i_ch not in mgraph:\n continue\n for j_ch in mgraph[i_ch]:\n # label of the adjacent element\n adjlabel = label_buffer[j_s, j_ch]\n # if the adjacent element is nonzero we need to do something\n if adjlabel:\n curlabel = label_buffer[i_s, i_ch]\n if curlabel==0:\n # if current element is still zero, we just assign\n # the label of the adjacent element to the current one\n label_buffer[i_s, i_ch] = adjlabel\n # and add it to the list for the labelled component\n comp_inds[adjlabel].append((i_s, i_ch))\n\n elif curlabel!=adjlabel:\n # if the current element is unequal to the adjacent\n # one, we merge them by reassigning the elements of the\n # adjacent component to the current one\n # samps_chans is an array of pairs sample, channel\n # currently assigned to component adjlabel\n samps_chans = np.array(comp_inds[adjlabel], dtype=np.int32)\n # samps_chans[:, 0] is the sample indices, so this\n # gives only the samp,chan pairs that are within\n # join_size of the current point\n # TODO: is this the right behaviour? If a component can\n # have a width bigger than join_size I think it isn't!\n samps_chans = samps_chans[i_s-samps_chans[:, 0]<=join_size]\n # relabel the adjacent samp,chan points with current\n # label\n samps, chans = samps_chans[:, 0], samps_chans[:, 1]\n label_buffer[samps, chans] = curlabel\n # add them to the current label list, and remove the\n # adjacent component entirely\n comp_inds[curlabel].extend(comp_inds.pop(adjlabel))\n # did not deal with merge condition, now fixed it seems...\n # WARNING: might this \"in\" incur a performance hit here...?\n if adjlabel in strong_nodes:\n strong_nodes.add(curlabel)\n strong_nodes.remove(adjlabel)\n \n # NEW: add the current component label to the set of all\n # strong nodes, if the current node is strong\n if curlabel > 0 and chunk_strong[i_s, i_ch]:\n strong_nodes.add(curlabel) \n \n if label_buffer[i_s, i_ch]==0:\n # if nothing is adjacent, we have the beginnings of a new component,\n # so we label it, create a new list for the new component which is\n # given label c_label, then increase c_label for the next new\n # component afterwards\n label_buffer[i_s, i_ch] = c_label\n comp_inds[c_label] = [(i_s, i_ch)]\n if chunk_strong[i_s, i_ch]:\n strong_nodes.add(c_label)\n c_label += 1\n \n # only return the values, because we don't actually need the labels\n comps = [comp_inds[key] for key in comp_inds.keys() if key in strong_nodes]\n if return_objects:\n return [Component(comp, chunk=chunk)\n for comp in comps]\n else:\n return comps\n "
},
{
"alpha_fraction": 0.5249900221824646,
"alphanum_fraction": 0.5559775829315186,
"avg_line_length": 42.842105865478516,
"blob_id": "5f60d3e8c501324815d60703b4c40ad33079c3f5",
"content_id": "ed9b2e2a41b822855d5100a3e8753dd8c43c65eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5002,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 114,
"path": "/ReadNWrite/edf.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 5 19:31:29 2018\n\n@author: edward\n\"\"\"\n\nimport os\nimport numpy as np\n\n\ndef readString(fid, strlength):\n tmp = np.fromfile(fid, '|S1', strlength)\n return np.ndarray.tostring(tmp).decode('UTF-8')\n\n\nclass Header(object):\n pass\n\nclass EDF(object):\n \"\"\"Load a EDF file\"\"\"\n def __init__(self, filepath=None, header_only=False):\n self.filepath = filepath\n self.header = Header()\n \n if filepath is not None and isinstance(filepath, str) and os.path.isfile(filepath):\n self.loadData(filepath, header_only)\n else:\n raise(Exception('Invalid EDF file'))\n \n def loadData(self, filepath, header_only=False):\n with open(filepath, 'rb') as fid:\n self.header.version = readString(fid, 8)\n self.header.patient_id = readString(fid, 8)\n fid.seek(17)\n self.header.gender = readString(fid, 1)\n fid.seek(19)\n self.header.DOB = readString(fid, 11)\n fid.seek(44)\n self.header.age = int(readString(fid, 2))\n fid.seek(98)\n self.header.start_date_entered = readString(fid, 11)\n fid.seek(110)\n self.header.eeg_id = readString(fid, 13).strip()\n fid.seek(124)\n self.header.tech = readString(fid, 2)\n fid.seek(127)\n self.header.machine = readString(fid, 20).strip()\n fid.seek(167)\n self.header.start_date = readString(fid, 8)\n self.header.start_time = readString(fid, 8)\n self.header.header_size = int(readString(fid, 8).strip())\n self.header.file_type = readString(fid, 5).strip()\n fid.seek(235)\n self.header.num_records = int(readString(fid, 8).strip())\n self.header.duration = float(readString(fid, 8).strip())\n self.header.num_signals = int(readString(fid, 4).strip())\n self.header.labels = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.labels[n] = readString(fid, 16).strip()\n \n self.header.trans_type = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.trans_type[n] = readString(fid, 80).strip()\n \n self.header.phys_dim = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.phys_dim[n] = readString(fid, 8).strip()\n \n fid.seek(fid.tell()+1)\n self.header.phys_min = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.phys_min[n] = float(readString(fid, 8).strip())\n \n self.header.phys_max = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.phys_max[n] = float(readString(fid, 8).strip())\n \n self.header.dig_min = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.dig_min[n] = int(readString(fid, 8).strip())\n \n self.header.dig_max = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.dig_max[n] = int(readString(fid, 8).strip())\n \n self.header.prefilt = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.prefilt[n] = readString(fid, 80).strip()\n \n self.header.sample_rate = [[]]*self.header.num_signals\n for n in range(self.header.num_signals):\n self.header.sample_rate[n] = int(readString(fid, 8).strip())\n \n if header_only:\n self.data = []\n else:\n fid.seek(fid.tell()+32 * self.header.num_signals) \n # Loading all the data for later organization\n Ch_data = np.fromfile(fid, np.int16).reshape(-1, self.header.num_records, order='F')\n Rs = np.cumsum(np.insert(int(self.header.duration)*np.array(self.header.sample_rate), 0, 0))\n self.data = [[]] * self.header.num_signals\n sf = (np.array(self.header.phys_max)- np.array(self.header.phys_min)) / (np.array(self.header.dig_max) - np.array(self.header.dig_min))\n dc = np.array(self.header.phys_max) - sf * np.array(self.header.dig_max)\n for n in range(self.header.num_signals):\n self.data[n] = Ch_data[Rs[n]:(Rs[n+1])].flatten(order='F') * sf[n] + dc[n]\n \n\n\nif __name__ == '__main__':\n filepath = '/Volumes/Storage/TUH_EEG/train/01_tcp_ar/009/00000906/s007_2003_04_28/00000906_s007_t000.edf'\n filepath = '/Volumes/Storage/TUH_EEG/train/01_tcp_ar/002/00000254/s005_2010_11_15/00000254_s005_t000.edf'\n edf = EDF(filepath)\n "
},
{
"alpha_fraction": 0.5243991017341614,
"alphanum_fraction": 0.5379137396812439,
"avg_line_length": 41.819149017333984,
"blob_id": "4c4ab2442cd25975a215abc34849732905bb79bd",
"content_id": "efac6e0a90e0e2a18f6fd096dd540df032add617",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12357,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 282,
"path": "/Plots/archive/ImportData_old.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 27 19:24:54 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport zipfile\r\n\r\ndataFile = 'C:/Users/Edward/Documents/Assignments/Scripts/Python/Plots/example/Cell B.10Feb15.S1.E28.dat'\r\n\r\nclass Protocol(object): # for composition\r\n pass\r\n\r\nclass NeuroData(object):\r\n \"\"\"Read electrophysiology data file\r\n \"\"\"\r\n def __init__(self, dataFile=None, old=False):\r\n \"\"\"Initialize class\"\"\"\r\n self.Voltage = {}\r\n self.Current = {}\r\n self.Stimulus = {}\r\n self.Protocol = Protocol() # composition\r\n if dataFile is not None and isinstance(dataFile, str):\r\n # load directly if all the conditions are met\r\n self.LoadData(dataFile=dataFile, old=old)\r\n else:\r\n IOError('Unrecognized data file input')\r\n\r\n def LoadData(self, dataFile, old=False):\r\n \"\"\"Load data in text file\"\"\"\r\n # check file exists\r\n if not os.path.isfile(dataFile):\r\n IOError('%s does not exist' %dataFile)\r\n # Evoke proper load method\r\n if old:\r\n self.LoadOldDataFile(dataFile)\r\n else:\r\n self.LoadDataFile(dataFile)\r\n\r\n def LoadDataFile(self, dataFile):\r\n \"\"\"Read zipped data file (new format)\"\"\"\r\n archive = zipfile.ZipFile(dataFile, 'r')\r\n # Check if the file is a valid zipfile\r\n if not archive.is_zipfile():\r\n IOError('%s is not a valid zip file'%dataFile)\r\n # read header txt file\r\n fid = archive.read('header.txt','r')\r\n self.Protocol.infoBytes = np.fromfile(fid, np.int32, 1) # size of header\r\n # ... etc\r\n\r\n def LoadOldDataFile(self, dataFile, numChannels=4, infoOnly=False):\r\n \"\"\"Read Old .dat format data file\"\"\"\r\n\r\n self.Protocol.numChannels = numChannels # hard set\r\n self.Protocol.readDataFrom = os.path.abspath(dataFile).replace('\\\\','/') # store read location\r\n with open(dataFile, 'rb') as fid:\r\n fid.seek(6, 0) # set to position 6 from the beginning of the file\r\n self.Protocol.infoBytes = np.fromfile(fid, np.int32, 1) # size of header\r\n self.Protocol.sweepWindow = np.fromfile(fid, np.float32, 1)[0] #in msec per episode\r\n self.Protocol.msPerPoint = np.fromfile(fid, np.float32, 1)[0] / 1000.0 # in microseconds per channel, divided by 1000 to msec\r\n self.Protocol.numPoints = np.fromfile(fid, np.float32, 1)[0] # number of data points\r\n self.Protocol.WCtime = np.fromfile(fid, np.float32, 1)[0] # in seconds since went whole cell\r\n self.Protocol.drugTime = np.fromfile(fid, np.float32,1)[0] # in seconds since most recent drug started\r\n self.Protocol.drug = np.fromfile(fid,np.float32,1)[0] #an integer indicating what drug is on\r\n\r\n #% new from BWS on 12/21/08\r\n np.fromfile(fid,np.int32,1) # simulated data\r\n fid.seek(48 , 0)\r\n self.Protocol.genData = np.fromfile(fid, np.float32, 56) # [need expansion]\r\n\r\n # read in TTL information\r\n self.Protocol.ttlData = []\r\n for index in xrange(self.Protocol.numChannels):\r\n fid.seek(10, 1) # 10 is for VB user-defined type stuff\r\n self.Protocol.ttlData.append(np.fromfile(fid, np.float32, 17)) #[need expansion]\r\n #print(fid.tell())\r\n\r\n # read in DAC information\r\n self.Protocol.dacData = []\r\n self.Protocol.dacName = []\r\n for index in xrange(self.Protocol.numChannels):\r\n fid.seek(10, 1) # 10 is for VB user-defined type stuff\r\n self.Protocol.dacData.append(np.fromfile(fid, np.float32, 42)) #[need exspansion]\r\n self.Protocol.dacName.append(self.readVBString(fid))\r\n\r\n #print(fid.tell())\r\n # Get other parameters\r\n self.Protocol.classVersionNum = np.fromfile(fid, np.float32, 1)[0]\r\n self.Protocol.acquireComment=self.readVBString(fid)\r\n self.Protocol.acquireAnalysisComment=self.readVBString(fid)\r\n self.Protocol.drugName=self.readVBString(fid)\r\n self.Protocol.exptDesc=self.readVBString(fid)\r\n self.Protocol.computerName=self.readVBString(fid)\r\n self.Protocol.savedFileName=os.path.abspath(self.readVBString(fid)).replace('\\\\','/')\r\n self.Protocol.fileName = self.Protocol.savedFileName\r\n self.Protocol.linkedFileName=os.path.abspath(self.readVBString(fid)).replace('\\\\','/')\r\n self.Protocol.acquisitionDeviceName=self.readVBString(fid)\r\n self.Protocol.traceKeys=self.readVBString(fid)\r\n self.Protocol.traceInitValuesStr=self.readVBString(fid)\r\n self.Protocol.extraScalarKeys=self.readVBString(fid)\r\n self.Protocol.extraVectorKeys=self.readVBString(fid)\r\n self.Protocol.genString=self.readVBString(fid)\r\n self.Protocol.TTLstring = []\r\n for index in xrange(self.Protocol.numChannels):\r\n self.Protocol.TTLstring.append(self.readVBString(fid))\r\n self.Protocol.ampDesc = []\r\n for index in xrange(self.Protocol.numChannels):\r\n self.Protocol.ampDesc.append(self.readVBString(fid))\r\n\r\n # Get Channel info\r\n channelDict = {'VoltADC1':'VoltA','VoltADC3':'VoltB',\r\n 'VoltADC5':'VoltC','VoltADC7':'VoltD',\r\n 'CurADC0':'CurA','CurADC2':'CurB',\r\n 'CurADC4':'CurC','CurADC6':'CurD',\r\n 'StimulusAmpA':'StimulusA',\r\n 'StimulusAmpB':'StimulusB',\r\n 'StimulusAmpC':'StimulusC',\r\n 'StimulusAmpD':'StimulusD',\r\n 'StimulusAmpA9':'StimulusA'}\r\n keys = [k.split(\"/\")[0] for k in self.Protocol.traceKeys.split()]\r\n self.Protocol.channelNames = [channelDict[k] for k in keys]\r\n self.Protocol.numTraces = len(self.Protocol.channelNames)\r\n\r\n if infoOnly: # stop here if only\r\n return\r\n\r\n # Read trace data\r\n self.Protocol.traceDesc = []\r\n for chan in self.Protocol.channelNames:\r\n traceFactor = float(np.fromfile(fid, np.float32, 1))\r\n traceLength = int(np.fromfile(fid, np.int32, 1))\r\n traceDesc = self.readVBString(fid)\r\n self.Protocol.traceDesc.append(traceDesc)\r\n traceData = np.fromfile(fid, np.int16, traceLength)\r\n traceData = traceFactor * traceData\r\n if chan[0] == 'V':\r\n self.Voltage[chan[-1]] = traceData\r\n elif chan[0] == 'C':\r\n self.Current[chan[-1]] = traceData\r\n elif chan[0] == 'S':\r\n self.Stimulus[chan[-1]] = traceData\r\n else: # fallthrough\r\n TypeError('Unrecognized channel type')\r\n\r\n # close file\r\n fid.close()\r\n\r\n @staticmethod\r\n def readVBString(fid):\r\n stringLength = int(np.fromfile(fid, np.int16, 1))\r\n if stringLength==0:\r\n return('')\r\n else:\r\n return(''.join(np.fromfile(fid, '|S1', stringLength)))\r\n\r\n\r\n\r\nclass ImageData(object):\r\n \"\"\"Read image data file\r\n \"\"\"\r\n def __init__(self, dataFile=None):\r\n \"\"\"Initialize class\"\"\"\r\n self.Img = {}\r\n self.Protocol = {}\r\n if dataFile is not None and isinstance(dataFile, str):\r\n self.LoadImageData(dataFile)\r\n\r\nclass FigureData(object):\r\n \"\"\"Data for plotting\r\n \"\"\"\r\n def __init__(self, dataFile=None):\r\n \"\"\"Initialize class\"\"\"\r\n self.series = {'x':[],'y':[],'z':[]}\r\n self.stats = {'x':{},'y':{},'z':{}}\r\n self.names = {'x':[],'y':[],'z':[]}\r\n self.num = {'x':[],'y':[],'z':[]} # count number of data sets\r\n if dataFile is not None and isinstance(dataFile, str):\r\n self.LoadFigureData(dataFile)\r\n\r\n def LoadFigureData(self, dataFile, delimiter=','):\r\n \"\"\"Read text data for figure plotting\"\"\"\r\n # check file exists\r\n if not os.path.isfile(dataFile):\r\n IOError('%s does not exist' %dataFile)\r\n\r\n with open(dataFile, 'rb') as fid:\r\n for line in fid: # iterate each line\r\n if not line.strip() or line[0] == \"#\":\r\n continue # skip comments and empty lines\r\n # split comma delimited string\r\n # series code, series name,@datatype, data1, data2, data3, ...\r\n lst = [s.strip() for s in line.split(delimiter)]\r\n # Parse variable\r\n v = lst[0][0] # variable name\r\n stats = lst[0][1:-1]\r\n # Read the data\r\n seriesData = self.ParseFigureData(lst[1][1:], lst[3:])\r\n # Organize the data to structure\r\n if stats != \"\": #stats, not empty\r\n if stats in self.stats[v].keys(): # key exists already\r\n self.stats[v][stats].append(seriesData)\r\n else: # add new key / create new list\r\n self.stats[v][stats] = [seriesData]\r\n else: # series data\r\n self.series[v].append(seriesData)\r\n self.names[v].append(lst[2][1:-1])\r\n\r\n fid.close()\r\n # Parse number of data set\r\n for v in self.series.keys():\r\n self.num[v] = len(self.series[v])\r\n\r\n @staticmethod\r\n def ParseFigureData(valueType, seriesList):\r\n \"\"\"Parse each line of read text\"\"\"\r\n if valueType == 'str':\r\n return(np.array(seriesList))\r\n elif valueType == 'float':\r\n return(np.array(seriesList).astype(np.float))\r\n elif valueType == 'int':\r\n return(np.array(seriesList).astype(np.int))\r\n else: # unrecognized type\r\n TypeError('Unrecognized data type')\r\n\r\n def Neuro2Figure(self, data, channels=None, streams=None):\r\n \"\"\"Use NeuroData method to load and parse trace data to be plotted\r\n data: an instance of NeuroData, ro a list of instances\r\n channels: list of channels to plot, e.g. ['A','C','D']\r\n streams: list of data streams, e.g. ['V','C','S']\r\n \"\"\"\r\n # Check instance\r\n if isinstance(data, NeuroData):\r\n data = [data] # convert to list\r\n\r\n # initialize notes, stored in stats attribute\r\n self.stats['y']['notes'] = []\r\n notes = \"%s %.1f mV %d pA channel %s WCTime %s min\"\r\n for n, d in enumerate(data): # iterate over all data\r\n # Time data\r\n self.series['x'].append(np.arange(0, d.Protocol.sweepWindow+\r\n d.Protocol.msPerPoint, d.Protocol.msPerPoint))\r\n self.names['x'].append('ms') # label unit\r\n # iterate over all the channels\r\n for c in channels:\r\n # iterate over data streams\r\n for s in streams:\r\n tmp = {'V': d.Voltage, 'C':d.Current, 'S': d.Stimulus}.get(s)\r\n if tmp is None or not bool(tmp):\r\n continue\r\n tmp = tmp[c]\r\n if tmp is None:\r\n continue\r\n self.series['y'].append(tmp)\r\n self.names['y'].append((s, c))\r\n if s == 'V':\r\n volt_i = tmp[0]\r\n if s == 'C':\r\n cur_i = tmp[0]\r\n dtime = self.sec2hhmmss(d.Protocol.WCtime)\r\n # Notes: {path} Initial: {voltage 0.1f mV} {current %d pA} \\\r\n # WC Time: 1:12:30.0 min\r\n notesstr = notes %(d.Protocol.readDataFrom, volt_i, cur_i, c,dtime)\r\n self.stats['y']['notes'].append(notesstr)\r\n\r\n @staticmethod\r\n def sec2hhmmss(sec):\r\n m, s = divmod(sec, 60)\r\n h, m = divmod(m, 60)\r\n return(\"%d:%d:%0.1f\" % (h, m, s))\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n data = NeuroData(dataFile, old=True)\r\n figdata = FigureData()\r\n figdata.Neuro2Figure(data, channels=['A','C','D'], streams=['V','C','S'])\r\n"
},
{
"alpha_fraction": 0.5481625199317932,
"alphanum_fraction": 0.5653623938560486,
"avg_line_length": 49.45970153808594,
"blob_id": "cfdca518c662fcf297508342c59f2533cdb50b37",
"content_id": "70b0fed96257937f5c554271af66afbbaaae5825",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 68954,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 1340,
"path": "/Pycftool/Pycftool.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'D:\\Edward\\Documents\\Assignments\\Scripts\\Python\\Pycftool\\resources\\ui_designer\\Pycftool_3.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.10.1\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\n\r\nimport os\r\nimport sys\r\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"../generic/\")))\r\n\r\nimport numpy as np\r\nfrom scipy.optimize import curve_fit\r\nfrom pdb import set_trace\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom QCodeEdit import QCodeEdit\r\nfrom ElideQLabel import ElideQLabel\r\nfrom MATLAB import *\r\nfrom FitOptions import *\r\n\r\n\r\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\r\n__version__ = \"Pycftool 0.1\"\r\n\r\ndef my_excepthook(type, value, tback):\r\n \"\"\"This helps prevent program crashing upon an uncaught exception\"\"\"\r\n sys.__excepthook__(type, value, tback)\r\n\r\n\r\nimport sip\r\nsip.setapi('QVariant', 2)\r\n\r\n# Routines for Qt import errors\r\nfrom PyQt5 import QtGui, QtCore, QtWidgets\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph import GraphicsLayoutWidget\r\nimport pyqtgraph.opengl as gl\r\n#view = gl.GLViewWidget()\r\n#from pyqtgraph.Qt import QtGui, QtCore\r\n\r\ntry:\r\n from PyQt5.QtCore import QString\r\nexcept ImportError:\r\n QString = str\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\n# try:\r\n# _encoding = QtGui.QApplication.UnicodeUTF8\r\n# def _translate(context, text, disambig):\r\n# return QtCore.QCoreApplication.translate(context, text, disambig, _encoding)\r\n# except AttributeError:\r\n# def _translate(context, text, disambig):\r\n# return QtCore.QCoreApplication.translate(context, text, disambig)\r\n\r\n_translate = QtCore.QCoreApplication.translate\r\n\r\nclass cftool_MainWindow(QtWidgets.QMainWindow):\r\n def __init__(self, parent=None, vars=[locals(), globals()]):\r\n super(cftool_MainWindow, self).__init__(parent)\r\n self.vars = self.filterVars(vars)\r\n self.xdata = np.nan\r\n self.ydata = np.nan\r\n self.zdata = np.nan\r\n self.wdata = np.nan\r\n self.availableData = {}\r\n self.currentDataType = 'None'\r\n self.varnames = {}\r\n self.autofit = 2\r\n self.centerscale = 0\r\n self.params = {}\r\n self.methods = {}\r\n # Display\r\n self.graphicsView = None\r\n # Set up fit options\r\n self.options = FitOptions(friend=self, method='2D: curve_fit')\r\n # Expose data combobox for manipulation later\r\n self.databox = {}\r\n self.DEBUG = False\r\n # Set up GUI window\r\n self.setupUi(self)\r\n\r\n def setupUi(self, MainWindow):\r\n \"\"\"This function is converted from the .ui file from the designer\"\"\"\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(800, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n\r\n # Grid layout to configure the tab and dock widget\r\n self.gridLayoutMain = QtWidgets.QGridLayout(self.centralwidget)\r\n self.gridLayoutMain.setObjectName(\"gridLayoutMain\")\r\n\r\n self.gridLayoutTab = QtWidgets.QGridLayout()\r\n self.gridLayoutTab.setObjectName(\"gridLayoutTab\")\r\n\r\n # Initialize tab\r\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\r\n self.tabWidget.setTabsClosable(True)\r\n self.tabWidget.setMovable(True)\r\n self.tabWidget.tabCloseRequested.connect(self.closeTab)\r\n self.tabWidget.setObjectName(\"tabWidget\")\r\n\r\n # First tab\r\n self.tab = QtWidgets.QWidget() # current tab\r\n self.tab.setObjectName(\"tab\")\r\n self.tabWidget.addTab(self.tab, \"\")\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"Untitled Fit 1\"))\r\n self.tabWidget.tabIndex = 1\r\n # Initialize the content of the tab\r\n self.initializeTabContents(self.tab)\r\n\r\n # Tab that will add another tab\r\n self.tabButton = QtWidgets.QToolButton()\r\n self.tabButton.setObjectName(\"tabButton\")\r\n self.tabButton.setText(\"+\")\r\n font = self.tabButton.font()\r\n font.setBold(True)\r\n self.tabButton.setFont(font)\r\n self.tabWidget.setCornerWidget(self.tabButton)\r\n self.tabButton.clicked.connect(lambda: self.newFit())\r\n\r\n # Adding the tab widget to the layout\r\n self.gridLayoutTab.addWidget(self.tabWidget, 0, 0, 1, 1)\r\n # Add the tab layout to the main layout\r\n self.gridLayoutMain.addLayout(self.gridLayoutTab, 0, 0, 1, 1)\r\n\r\n\r\n # <editor-fold desc=\"Configure dockWidget Fit Table\">\r\n # Table of fits dockWidget\r\n self.fitTable_dockWidget = QtWidgets.QDockWidget(MainWindow)\r\n self.fitTable_dockWidget.setObjectName(\"fitTable_dockWidget\")\r\n self.fitTable_dockWidgetContents = QtWidgets.QWidget()\r\n self.fitTable_dockWidgetContents.setObjectName(\"fitTable_dockWidgetContents\")\r\n self.fitTable_dockWidget.hide()\r\n\r\n # Layout for table of fits\r\n self.gridLayoutFitsTable = QtWidgets.QGridLayout(self.fitTable_dockWidgetContents)\r\n self.gridLayoutFitsTable.setObjectName(\"gridLayoutFitsTable\")\r\n self.gridLayoutTable = QtWidgets.QGridLayout()\r\n self.gridLayoutTable.setSpacing(7)\r\n self.gridLayoutTable.setObjectName(\"gridLayoutTable\")\r\n self.tableWidget = QtWidgets.QTableWidget(self.fitTable_dockWidgetContents)\r\n self.tableWidget.setObjectName(\"tableWidget\")\r\n self.tableWidget.setColumnCount(0)\r\n self.tableWidget.setRowCount(0)\r\n self.gridLayoutTable.addWidget(self.tableWidget, 0, 0, 1, 1)\r\n self.gridLayoutFitsTable.addLayout(self.gridLayoutTable, 0, 0, 1, 1)\r\n\r\n # DockWidget table of fits\r\n self.fitTable_dockWidget.setWidget(self.fitTable_dockWidgetContents)\r\n MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(8), self.fitTable_dockWidget)\r\n # </editor-fold>\r\n\r\n # Menu\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))\r\n self.menubar.setObjectName(\"menubar\")\r\n self.setMenuBarItems() # Call function to set menubar\r\n MainWindow.setMenuBar(self.menubar)\r\n\r\n # Set up status bar\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n # Run\r\n self.retranslateUi(MainWindow)\r\n self.tabWidget.setCurrentIndex(1)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def newFit(self, title=None):\r\n \"\"\"Insert a new tab\"\"\"\r\n tab = QtWidgets.QWidget() # Create a new tab\r\n self.tabWidget.addTab(tab, \"\") # Insert the new tab right before the tab_plus\r\n if title is None:\r\n self.tabWidget.tabIndex = self.tabWidget.tabIndex + 1\r\n index = self.tabWidget.tabIndex\r\n title = \"Untitled Fit \" + str(index)\r\n\r\n tab_index =self.tabWidget.indexOf(tab)\r\n self.tabWidget.setTabText(tab_index, _translate(\"MainWindow\", title))\r\n self.tabWidget.setCurrentIndex(tab_index) # switch to that tab\r\n self.initializeTabContents(tab) # initialize the contents\r\n\r\n def initializeTabContents(self, tab):\r\n \"\"\"Tab content\"\"\"\r\n gridLayoutFits = QtWidgets.QGridLayout(tab)\r\n\r\n # Method\r\n method_groupBox = QtWidgets.QGroupBox(tab)\r\n method_groupBox.setTitle(\"\")\r\n method_groupBox.setObjectName(\"method_groupBox\")\r\n self.method_groupBox = method_groupBox\r\n self.initialize_method(method_groupBox)\r\n gridLayoutFits.addWidget(method_groupBox, 0, 4, 1, 8)\r\n\r\n # Display\r\n display_groupBox = QtWidgets.QGroupBox(tab)\r\n display_groupBox.setTitle(\"\")\r\n display_groupBox.setObjectName(\"displayGroupBox\")\r\n display_groupBox.setMinimumSize(500, 280)\r\n self.initialize_display(display_groupBox)\r\n self.display_groupBox = display_groupBox\r\n gridLayoutFits.addWidget(display_groupBox, 1, 3, 1, 11)\r\n\r\n # AutoFit\r\n autofit_groupBox = QtWidgets.QGroupBox(tab)\r\n autofit_groupBox.setTitle(\"\")\r\n self.initialize_autofit(autofit_groupBox)\r\n gridLayoutFits.addWidget(autofit_groupBox, 0, 12, 1, 2)\r\n\r\n # Results\r\n results_groupBox = QtWidgets.QGroupBox(tab)\r\n results_groupBox.setTitle(_translate(\"MainWindow\", \"Results\"))\r\n self.initialize_results(results_groupBox)\r\n results_groupBox.setMinimumWidth(300)\r\n gridLayoutFits.addWidget(results_groupBox, 1, 0, 1, 3)\r\n\r\n # Data: Initialize data after everything, in case of a call of the class with an input\r\n data_groupBox = QtWidgets.QGroupBox(tab)\r\n data_groupBox.setTitle(\"\")\r\n # data_groupBox.setObjectName(\"dataGroupBox\")\r\n self.initialize_data(data_groupBox)\r\n gridLayoutFits.addWidget(data_groupBox, 0, 0, 1, 4)\r\n\r\n return gridLayoutFits\r\n\r\n def initialize_data(self, gbox):\r\n \"\"\"Initialize the data groupbox in the tab\"\"\"\r\n gbox.setLayout(QtWidgets.QGridLayout())\r\n fitname_label = QtWidgets.QLabel(\"Fit Name:\")\r\n fitname_text = QtWidgets.QLineEdit()\r\n fitName = self.tabWidget.tabText(self.tabWidget.indexOf(self.tabWidget.currentWidget()))\r\n fitname_text.setText(fitName)\r\n fitname_text.editingFinished.connect(lambda: self.changeTabTitle(fitname_text.text()))\r\n\r\n comboList = ['(none)']+list(self.vars.keys())\r\n x_label = QtWidgets.QLabel(\"X data:\")\r\n x_comboBox = QtWidgets.QComboBox()\r\n x_comboBox.addItems(comboList)\r\n x_comboBox.currentIndexChanged.connect(lambda: self.onDataChanged('xdata', x_comboBox.currentText()))\r\n x_comboBox.setCurrentIndex(1) # for development\r\n\r\n y_label = QtWidgets.QLabel(\"Y data:\")\r\n y_comboBox = QtWidgets.QComboBox()\r\n y_comboBox.addItems(comboList)\r\n y_comboBox.currentIndexChanged.connect(lambda: self.onDataChanged('ydata', y_comboBox.currentText()))\r\n y_comboBox.setCurrentIndex(2) # for development\r\n\r\n z_label = QtWidgets.QLabel(\"Z data:\")\r\n z_comboBox = QtWidgets.QComboBox()\r\n z_comboBox.addItems(comboList)\r\n z_comboBox.currentIndexChanged.connect(lambda: self.onDataChanged('zdata', z_comboBox.currentText()))\r\n #z_comboBox.setCurrentIndex(3)\r\n\r\n w_label = QtWidgets.QLabel(\"Weights:\")\r\n w_comboBox = QtWidgets.QComboBox()\r\n w_comboBox.addItems(comboList)\r\n w_comboBox.currentIndexChanged.connect(lambda: self.onDataChanged('wdata', w_comboBox.currentText()))\r\n #w_comboBox.setCurrentIndex(0)\r\n\r\n self.databox = {'xdata': x_comboBox, 'ydata': y_comboBox, 'zdata': z_comboBox, 'wdata': w_comboBox}\r\n gbox.layout().addWidget(fitname_label, 0, 0, 1, 1)\r\n gbox.layout().addWidget(fitname_text, 0, 1, 1, 1)\r\n gbox.layout().addWidget(x_label, 1, 0, 1, 1)\r\n gbox.layout().addWidget(x_comboBox, 1, 1, 1, 1)\r\n gbox.layout().addWidget(y_label, 2, 0, 1, 1)\r\n gbox.layout().addWidget(y_comboBox, 2, 1, 1, 1)\r\n gbox.layout().addWidget(z_label, 3, 0, 1, 1)\r\n gbox.layout().addWidget(z_comboBox, 3, 1, 1, 1)\r\n gbox.layout().addWidget(w_label, 4, 0, 1, 1)\r\n gbox.layout().addWidget(w_comboBox, 4, 1, 1, 1)\r\n\r\n def changeTabTitle(self, new_text):\r\n tab_index = self.tabWidget.indexOf(self.tabWidget.currentWidget())\r\n self.tabWidget.setTabText(tab_index, _translate(\"MainWindow\", new_text))\r\n\r\n def onDataChanged(self, key, valname):\r\n \"\"\"Change, then graph, then fit the data upon data combobox index changed\"\"\"\r\n # Set data\r\n if valname == '(none)':\r\n setattr(self, key, np.nan)\r\n del self.availableData[key]\r\n else:\r\n setattr(self, key, self.vars[valname])\r\n self.availableData[key] = len(self.vars[valname])\r\n self.varnames[key] = valname\r\n if len(self.vars[valname]) < 3:\r\n # warn for data less than 3 elements\r\n popup_messageBox = QtWidgets.QMessageBox()\r\n popup_messageBox.setWindowTitle('Warning')\r\n popup_messageBox.setText('Warning: Array length cannot be less than 3')\r\n popup_messageBox.exec_()\r\n\r\n data_list, data_len = self.availableData.keys(), self.availableData.values()\r\n and_join = lambda x: \", \".join(x)[::-1].replace(\" ,\", \" dna \", 1)[::-1].replace(\r\n \" and\", \", and\" if len(x) > 2 else \" and\")\r\n\r\n # Check to see if the data all has the same length\r\n if len(set(data_len)) != 1:\r\n samelength_warngBox = QtWidgets.QMessageBox()\r\n samelength_warngBox.setWindowTitle('Warning')\r\n samelength_warngBox.setText('Warning: {} array length are not the same'.format(and_join(data_list)))\r\n samelength_warngBox.setInformativeText(str(self.availableData))\r\n samelength_warngBox.exec_()\r\n\r\n # Change the method list comboBox\r\n if set(data_list) == set(['xdata']) or \\\r\n set(data_list) == set(['xdata', 'ydata']) or \\\r\n set(data_list) == set(['xdata', 'ydata', 'wdata']):\r\n newDataType = '2D'\r\n else:\r\n newDataType = '3D'\r\n\r\n if self.currentDataType != newDataType:\r\n self.currentDataType = newDataType\r\n method_comboBox = self.method_groupBox.layout().itemAt(0).widget()\r\n if newDataType == '3D':\r\n self.setMethodLists(method_comboBox, methodList=1)\r\n else: # 2D\r\n self.setMethodLists(method_comboBox, methodList=2)\r\n\r\n # Issue scatter plots\r\n self.graphData(data_list)\r\n\r\n # Do the fitting\r\n self.curveFit()\r\n\r\n def initialize_method(self, gbox, method_list='Method 2', current_method='Custom Equation'):\r\n \"\"\"Initialize the method groupBox in the tab\"\"\"\r\n gbox.setLayout(QtWidgets.QGridLayout())\r\n if method_list == 'Method 1': # 3D data\r\n method_set_list = ['Custom Equation', 'Interpolant', 'Lowess', 'Polynomial']\r\n else: # 'Method 2', # 2D data\r\n method_set_list = ['Custom Equation', 'Exponential', 'Fourier', 'Gaussian', 'Interpolant',\r\n 'Linear Fitting', 'Polynomial', 'Power', 'Rational', 'Smoothing Spline',\r\n 'Sum of Sine', 'Weibull']\r\n method_comboBox = QtWidgets.QComboBox()\r\n method_comboBox.setObjectName(method_list)\r\n method_comboBox.addItems(method_set_list)\r\n method_comboBox.setCurrentIndex(method_set_list.index(current_method))\r\n method_comboBox.currentIndexChanged.connect(lambda: self.toggleMethods(method_comboBox.currentText()))\r\n gbox.layout().addWidget(method_comboBox, 0, 0, 1, 8)\r\n self.toggleMethods(method=current_method)\r\n\r\n def setMethodLists(self, method_comboBox, methodList=1, currentMethod=None):\r\n \"\"\"Called when data type changed\"\"\"\r\n # Block the signal during reset\r\n method_comboBox.blockSignals(True)\r\n method_comboBox.clear()\r\n\r\n if methodList == 1: # 3D data\r\n method_comboBox.setObjectName(\"Method 1\")\r\n method_list = ['Custom Equation', 'Interpolant', 'Lowess', 'Polynomial']\r\n currentMethod = 'Custom Equation' if currentMethod is None else currentMethod\r\n else: # 2D data\r\n method_comboBox.setObjectName(\"Method 2\")\r\n method_list = ['Custom Equation', 'Exponential', 'Fourier', 'Gaussian', 'Interpolant',\r\n 'Linear Fitting', 'Polynomial', 'Power', 'Rational', 'Smoothing Spline',\r\n 'Sum of Sine', 'Weibull']\r\n currentMethod = 'Exponential' if currentMethod is None else currentMethod\r\n # Adding method list\r\n method_comboBox.addItems(method_list)\r\n # re-enable the signal\r\n method_comboBox.blockSignals(False)\r\n # Reset method\r\n try:\r\n current_index = method_list.index(currentMethod)\r\n method_comboBox.setCurrentIndex(current_index)\r\n except:\r\n raise(ValueError('Cannot set method \"{}\", which is not in the list of valid methods'.format(currentMethod)))\r\n\r\n def toggleMethods(self, method, block_signal=False):\r\n gbox = self.method_groupBox\r\n method_comboBox = gbox.layout().itemAt(0).widget()\r\n if block_signal: method_comboBox.blockSignals(True)\r\n # Get the setting table\r\n if method_comboBox.objectName() == 'Method 1':\r\n methods_layout, methods_dict = self.methodSet1(method=method)\r\n else:\r\n methods_layout, methods_dict = self.methodSet2(method=method)\r\n # Remove everything at and below the setting rows: rigid setting\r\n gbox = self.removeFromWidget(gbox, row=1)\r\n\r\n for key, val in methods_layout.items():\r\n gbox.layout().addWidget(val, *key[0], *key[1]) # widget, (x, y), (w, l)\r\n\r\n self.methods = methods_dict\r\n\r\n if self.autofit and hasattr(self, 'result_textBox') and self.result_textBox.toPlainText():\r\n print('redo fitting upon method change')\r\n self.curveFit() # redo the fitting\r\n\r\n if block_signal: method_comboBox.blockSignals(False)\r\n\r\n def removeFromWidget(self, widgetFrame, row=0):\r\n \"\"\"Remove widgets from a widgetFrame below row\"\"\"\r\n nrows = widgetFrame.layout().rowCount()\r\n if nrows > row:\r\n for r in range(row, nrows):\r\n for col in range(widgetFrame.layout().columnCount()):\r\n currentItem = widgetFrame.layout().itemAtPosition(r, col)\r\n if currentItem is not None:\r\n currentItem.widget().deleteLater()\r\n widgetFrame.layout().removeItem(currentItem)\r\n #widgetFrame.layout().removeWidget(currentItem.widget())\r\n return widgetFrame\r\n\r\n def clearWidget(self, widgetFrame):\r\n \"\"\"clear everything from a widget\"\"\"\r\n for i in reversed(range(widgetFrame.layout().count())):\r\n widgetFrame.layout().itemAt(i).widget().setParent(None)\r\n\r\n def methodSet1(self, method='Interpolant'):\r\n \"\"\"For 3D data\"\"\"\r\n methods_layout, methods_dict = {}, {'dim': '3D', 'method': method}\r\n if method == \"Custom Equation\":\r\n x_lineEdit = QtWidgets.QLineEdit(\"x\")\r\n y_lineEdit = QtWidgets.QLineEdit(\"y\")\r\n z_lineEdit = QtWidgets.QLineEdit(\"z\")\r\n eqs_textEdit = QCodeEdit(\"\"\"a + b * np.sin(m * pi * x * y)\\n + c * np.exp(-(w * y)**2)\"\"\")\r\n eqs_textEdit.setMaximumHeight(50)\r\n f_label = QtWidgets.QLabel(\"f(\")\r\n endp_label = QtWidgets.QLabel(\")\")\r\n eql_label = QtWidgets.QLabel(\"=\")\r\n eql_label2= QtWidgets.QLabel(\"=\")\r\n comma_label = QtWidgets.QLabel(\",\")\r\n methods_layout[(1, 0), (1, 1)] = z_lineEdit\r\n methods_layout[(1, 1), (1, 1)] = eql_label\r\n methods_layout[(1, 2), (1, 1)] = f_label\r\n methods_layout[(1, 3), (1, 1)] = x_lineEdit\r\n methods_layout[(1, 4), (1, 1)] = comma_label\r\n methods_layout[(1, 5), (1, 1)] = y_lineEdit\r\n methods_layout[(1, 6), (1, 1)] = endp_label\r\n methods_layout[(2, 1), (1, 1)] = eql_label2\r\n methods_layout[(2, 2), (1, 6)] = eqs_textEdit\r\n # raise(NotImplementedError(\"Custom Equation currently not supported\"))\r\n elif method == \"Interpolant\":\r\n type_label = QtWidgets.QLabel(\"Method:\")\r\n type_comboBox = QtWidgets.QComboBox()\r\n type_comboBox.addItems([\"Nearest Neighbor\", \"Linear\", \"Cubic\", \"Biharmonic\", \"Thin-plate Spline\"])\r\n type_comboBox.setCurrentIndex(1)\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n methods_layout[(1, 0), (1, 1)] = type_label\r\n methods_layout[(1, 1), (1, 7)] = type_comboBox\r\n methods_layout[(2, 0), (1, 3)] = centerScale_checkBox\r\n elif method == \"Lowess\":\r\n poly_label = QtWidgets.QLabel(\"Polynomial:\")\r\n poly_comboBox = QtWidgets.QComboBox()\r\n poly_comboBox.addItems([\"Linear\", \"Quadratic\"])\r\n span_label = QtWidgets.QLabel(\"Span:\")\r\n span_lineEdit = QtWidgets.QLineEdit(\"25\")\r\n span_pecent_label = QtWidgets.QLabel(\"%\")\r\n robust_label = QtWidgets.QLabel(\"Robust:\")\r\n robust_comboBox = QtWidgets.QComboBox()\r\n robust_comboBox.addItems([\"Off\", \"LAR\", \"Bisquare\"])\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n methods_layout[(1, 0), (1, 1)] = poly_label\r\n methods_layout[(1, 1), (1, 7)] = poly_comboBox\r\n methods_layout[(2, 0), (1, 1)] = span_label\r\n methods_layout[(2, 1), (1, 6)] = span_lineEdit\r\n methods_layout[(2, 7), (1, 1)] = span_pecent_label\r\n methods_layout[(3, 0), (1, 1)] = robust_label\r\n methods_layout[(3, 1), (1, 7)] = robust_comboBox\r\n methods_layout[(4, 0), (1, 3)] = centerScale_checkBox\r\n elif method == \"Polynomial\":\r\n deg_label = QtWidgets.QLabel(\"Degrees:\")\r\n x_label = QtWidgets.QLabel(\"x:\")\r\n x_label.setFixedWidth(8)\r\n x_comboBox = QtWidgets.QComboBox()\r\n x_comboBox.addItems([\"1\", \"2\", \"3\", \"4\", \"5\"])\r\n y_label = QtWidgets.QLabel(\"y:\")\r\n y_label.setFixedWidth(8)\r\n y_comboBox = QtWidgets.QComboBox()\r\n y_comboBox.addItems([\"1\", \"2\", \"3\", \"4\", \"5\"])\r\n robust_label = QtWidgets.QLabel(\"Robust:\")\r\n robust_comboBox = QtWidgets.QComboBox()\r\n robust_comboBox.addItems([\"Off\", \"LAR\", \"Bisquare\"])\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n methods_layout[(1, 0), (1, 2)] = deg_label\r\n methods_layout[(1, 2), (1, 1)] = x_label\r\n methods_layout[(1, 3), (1, 2)] = x_comboBox\r\n methods_layout[(1, 5), (1, 1)] = y_label\r\n methods_layout[(1, 6), (1, 2)] = y_comboBox\r\n methods_layout[(2, 0), (1, 2)] = robust_label\r\n methods_layout[(2, 2), (1, 6)] = robust_comboBox\r\n methods_layout[(3, 0), (1, 6)] = centerScale_checkBox\r\n else:\r\n raise(NotImplementedError(\"Unrecognized method: {}\".format(method)))\r\n\r\n return methods_layout, methods_dict\r\n\r\n def methodSet2(self, method=\"Polynomial\"):\r\n \"\"\"For 2D data\"\"\"\r\n def on_centerscale_changed(isChecked):\r\n self.centerscale = isChecked\r\n self.curveFit() # reissue curve fitting\r\n\r\n def on_numterms_changed(index, eqs_label, text_dict, terms_dict):\r\n eqs_label.setText(text_dict.get(index))\r\n # Change the initialization parameters\r\n self.options.setInitializationParameters(terms_dict.get(index))\r\n self.curveFit() # reissue curve fitting\r\n\r\n def on_rational_deg_spinbox_changed(p, q):\r\n terms_list = self.list_rational_terms(p, q)\r\n self.options.setInitializationParameters(terms_list)\r\n self.curveFit()\r\n\r\n methods_layout, methods_dict = {}, {'dim': '2D', 'method': method}\r\n if method == \"Custom Equation\":\r\n x_lineEdit = QtWidgets.QLineEdit(\"x\")\r\n x_lineEdit.setMaximumWidth(30)\r\n y_lineEdit = QtWidgets.QLineEdit(\"y\")\r\n y_lineEdit.setMaximumWidth(30)\r\n eqs_textEdit = QCodeEdit(\"\"\"a*np.exp(-b*x)+c\"\"\")\r\n eqs_textEdit.setMaximumHeight(50)\r\n f_label = QtWidgets.QLabel(\"f(\")\r\n f_label.setMaximumWidth(20)\r\n endp_label = QtWidgets.QLabel(\")\")\r\n eql_label = QtWidgets.QLabel(\"=\")\r\n eql_label.setMaximumWidth(20)\r\n eql_label2 = QtWidgets.QLabel(\"=\")\r\n eql_label.setMaximumWidth(20)\r\n self.options.setMethod(method='2D: curve_fit')\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n fitopt_Button.clicked.connect(lambda: self.options.show())\r\n methods_layout[(1, 0), (1, 1)] = y_lineEdit\r\n methods_layout[(1, 1), (1, 1)] = eql_label\r\n methods_layout[(1, 2), (1, 1)] = f_label\r\n methods_layout[(1, 3), (1, 1)] = x_lineEdit\r\n methods_layout[(1, 4), (1, 1)] = endp_label\r\n methods_layout[(2, 1), (1, 1)] = eql_label2\r\n methods_layout[(2, 2), (1, 6)] = eqs_textEdit\r\n methods_layout[(3, 7), (1, 1)] = fitopt_Button\r\n #raise (NotImplementedError(\"Custom Equation currently not supported\"))\r\n elif method == \"Exponential\":\r\n eqs_eqs_label = QtWidgets.QLabel(\"Equation:\")\r\n eqs_label = ElideQLabel(\"a*exp(b*x)\")\r\n eqs_label.setMinimumWidth(200)\r\n numterms_label = QtWidgets.QLabel(\"Number of parameters:\")\r\n numterms_comboBox = QtWidgets.QComboBox()\r\n numterms_comboBox.addItems([\"2: a*exp(b*x)\", \"3: a*exp(b*x)+c\", \"4: a*exp(b*x) + c*exp(d*x)\"])\r\n numterms_comboBox.currentIndexChanged.connect(lambda index: on_numterms_changed(index, eqs_label, \\\r\n {0: \"a*exp(b*x)\", 1: \"a*exp(b*x)+c\",2: \"a*exp(b*x) + c*exp(d*x)\"}, \\\r\n {0:['a','b'],1:['a','b','c'], 2:['a','b','c', 'd']}))\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n centerScale_checkBox.stateChanged.connect(lambda isChecked: on_centerscale_changed(isChecked))\r\n self.options.setMethod(method='2D: curve_fit')\r\n self.options.setInitializationParameters(coefficients=['a','b'])\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n fitopt_Button.clicked.connect(lambda: self.options.show())\r\n methods_layout[(1, 0), (1, 1)] = numterms_label\r\n methods_layout[(1, 1), (1, 7)] = numterms_comboBox\r\n methods_layout[(2, 0), (1, 1)] = eqs_eqs_label\r\n methods_layout[(2, 1), (1, 7)] = eqs_label\r\n methods_layout[(3, 0), (1, 7)] = centerScale_checkBox\r\n methods_layout[(4, 7), (1, 1)] = fitopt_Button\r\n methods_dict.update({'terms': eqs_label, 'center_and_scale': centerScale_checkBox})\r\n elif method == \"Fourier\": # TODO Mixture of Fourier\r\n eqs_eqs_label = QtWidgets.QLabel(\"Equation:\")\r\n eqs_label = ElideQLabel(\"a0 + a1*cos(x*w) + b1*sin(x*w)\")\r\n eqs_label.setMinimumWidth(200)\r\n numterms_label = QtWidgets.QLabel(\"Number of parameters:\")\r\n numterms_comboBox = QtWidgets.QComboBox()\r\n numterms_comboBox.addItems([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"])\r\n numterms_comboBox.currentIndexChanged.connect(lambda index:\r\n eqs_label.setText({0: \"a0 + a1*cos(x*w) + b1*sin(x*w)\",\r\n 1: \"a0 + a1*cos(x*w) + b1*sin(x*w) + a2*cos(2*x*w) + b2*sin(2*x*w)\",\r\n 2: \"a0 + a1*cos(x*w) + b1*sin(x*w) + ... + a3*cos(3*x*w) + b3*sin(3*x*w)\",\r\n 3: \"a0 + a1*cos(x*w) + b1*sin(x*w) + ... + a4*cos(4*x*w) + b4*sin(4*x*w)\",\r\n 4: \"a0 + a1*cos(x*w) + b1*sin(x*w) + ... + a5*cos(5*x*w) + b5*sin(5*x*w)\",\r\n 5: \"a0 + a1*cos(x*w) + b1*sin(x*w) + ... + a6*cos(6*x*w) + b6*sin(6*x*w)\",\r\n 6: \"a0 + a1*cos(x*w) + b1*sin(x*w) + ... + a7*cos(7*x*w) + b7*sin(7*x*w)\",\r\n 7: \"a0 + a1*cos(x*w) + b1*sin(x*w) + ... + a8*cos(8*x*w) + b8*sin(8*x*w)\"\r\n }.get(index)))\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n methods_layout[(1, 0), (1, 1)] = numterms_label\r\n methods_layout[(1, 1), (1, 7)] = numterms_comboBox\r\n methods_layout[(2, 0), (1, 1)] = eqs_eqs_label\r\n methods_layout[(2, 1), (1, 7)] = eqs_label\r\n methods_layout[(3, 0), (1, 7)] = centerScale_checkBox\r\n methods_layout[(4, 7), (1, 1)] = fitopt_Button\r\n elif method == \"Gaussian\": # mixture of Gaussian\r\n eqs_eqs_label = QtWidgets.QLabel(\"Equation:\")\r\n eqs_label = ElideQLabel(\"a1*exp(-((x-b1)/c1)^2\")\r\n eqs_label.setMinimumWidth(200)\r\n numterms_label = QtWidgets.QLabel(\"Number of parameters:\")\r\n numterms_comboBox = QtWidgets.QComboBox()\r\n numterms_comboBox.addItems([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"])\r\n numterms_comboBox.currentIndexChanged.connect(lambda index:\r\n eqs_label.setText({0: \"a1*exp(-((x-b1)/c1)^2\",\r\n 1: \"a1*exp(-((x-b1)/c1)^2 + a2*exp(-((x-b2)/c2)^2\",\r\n 2: \"a1*exp(-((x-b1)/c1)^2 + ... + a3*exp(-((x-b3)/c3)^2\",\r\n 3: \"a1*exp(-((x-b1)/c1)^2 + ... + a4*exp(-((x-b4)/c4)^2\",\r\n 4: \"a1*exp(-((x-b1)/c1)^2 + ... + a5*exp(-((x-b5)/c5)^2\",\r\n 5: \"a1*exp(-((x-b1)/c1)^2 + ... + a6*exp(-((x-b6)/c6)^2\",\r\n 6: \"a1*exp(-((x-b1)/c1)^2 + ... + a7*exp(-((x-b7)/c7)^2\",\r\n 7: \"a1*exp(-((x-b1)/c1)^2 + ... + a8*exp(-((x-b8)/c8)^2\",\r\n }.get(index)))\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n methods_layout[(1, 0), (1, 1)] = numterms_label\r\n methods_layout[(1, 1), (1, 7)] = numterms_comboBox\r\n methods_layout[(2, 0), (1, 1)] = eqs_eqs_label\r\n methods_layout[(2, 1), (1, 7)] = eqs_label\r\n methods_layout[(3, 0), (1, 7)] = centerScale_checkBox\r\n methods_layout[(4, 7), (1, 1)] = fitopt_Button\r\n elif method == \"Interpolant\":\r\n type_label = QtWidgets.QLabel(\"Method:\")\r\n type_comboBox = QtWidgets.QComboBox()\r\n type_comboBox.addItems([\"Nearest Neighbor\", \"Linear\", \"Cubic\", \"Shape-preserving (PCHIP)\"])\r\n type_comboBox.setCurrentIndex(1)\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n methods_layout[(1, 0), (1, 1)] = type_label\r\n methods_layout[(1, 1), (1, 7)] = type_comboBox\r\n methods_layout[(2, 0), (1, 3)] = centerScale_checkBox\r\n elif method == \"Linear Fitting\":\r\n x_lineEdit = QtWidgets.QLineEdit(\"x\")\r\n x_lineEdit.setMaximumWidth(30)\r\n y_lineEdit = QtWidgets.QLineEdit(\"y\")\r\n y_lineEdit.setMaximumWidth(30)\r\n eqs_textEdit = QCodeEdit(\"\"\"a*(sin(x-pi))+b*((x-10)^2)+c*(1)\"\"\")\r\n f_label = QtWidgets.QLabel(\"f(\")\r\n f_label.setMaximumWidth(20)\r\n endp_label = QtWidgets.QLabel(\")\")\r\n eql_label = QtWidgets.QLabel(\"=\")\r\n eql_label.setMaximumWidth(20)\r\n eql_label2 = QtWidgets.QLabel(\"=\")\r\n eql_label.setMaximumWidth(20)\r\n edit_Button = QtWidgets.QPushButton(\"Edit\") # pops up a window to edit a list of equations, linearly summed together\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n methods_layout[(1, 0), (1, 1)] = y_lineEdit\r\n methods_layout[(1, 1), (1, 1)] = eql_label\r\n methods_layout[(1, 2), (1, 1)] = f_label\r\n methods_layout[(1, 3), (1, 1)] = x_lineEdit\r\n methods_layout[(1, 4), (1, 1)] = endp_label\r\n methods_layout[(2, 1), (1, 1)] = eql_label2\r\n methods_layout[(2, 2), (1, 6)] = eqs_textEdit\r\n methods_layout[(3, 6), (1, 1)] = edit_Button\r\n methods_layout[(3, 7), (1, 1)] = fitopt_Button\r\n elif method == \"Polynomial\":\r\n deg_label = QtWidgets.QLabel(\"Degree:\")\r\n deg_spinBox = QtWidgets.QSpinBox()\r\n deg_spinBox.setValue(1)\r\n deg_spinBox.setMinimum(1)\r\n deg_spinBox.valueChanged.connect(lambda: self.curveFit())\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n methods_layout[(1, 0), (1, 2)] = deg_label\r\n methods_layout[(1, 2), (1, 6)] = deg_spinBox\r\n methods_layout[(2, 0), (1, 6)] = centerScale_checkBox\r\n methods_dict.update({'degree': deg_spinBox})\r\n # Done\r\n elif method == \"Power\":\r\n eqs_eqs_label = QtWidgets.QLabel(\"Equation:\")\r\n eqs_label = QtWidgets.QLabel(\"a*x^b\")\r\n numterms_label = QtWidgets.QLabel(\"Number of terms:\")\r\n numterms_comboBox = QtWidgets.QComboBox()\r\n numterms_comboBox.addItems([\"1\", \"2\"])\r\n numterms_comboBox.currentIndexChanged.connect(lambda index: on_numterms_changed(index, eqs_label, \\\r\n {0: \"a*x^b\", 1: \"a*x^b+c\"}, \\\r\n {0: [\"a\", \"b\"], 1: [\"a\", \"b\", \"c\"]}))\r\n self.options.setMethod(method='2D: curve_fit')\r\n self.options.setInitializationParameters(coefficients=['a', 'b'])\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n fitopt_Button.clicked.connect(lambda: self.options.show())\r\n methods_layout[(1, 0), (1, 1)] = numterms_label\r\n methods_layout[(1, 1), (1, 7)] = numterms_comboBox\r\n methods_layout[(2, 0), (1, 1)] = eqs_eqs_label\r\n methods_layout[(2, 1), (1, 6)] = eqs_label\r\n methods_layout[(3, 7), (1, 1)] = fitopt_Button\r\n methods_dict.update({\"terms\": eqs_label})\r\n elif method == \"Rational\":\r\n numdeg_label = QtWidgets.QLabel(\"Numerator Degree:\")\r\n numdeg_spinBox = QtWidgets.QSpinBox()\r\n numdeg_spinBox.setMinimum(0)\r\n numdeg_spinBox.setValue(0)\r\n dendeg_label = QtWidgets.QLabel(\"Denominator Degree:\")\r\n dendeg_spinBox = QtWidgets.QSpinBox()\r\n dendeg_spinBox.setValue(1)\r\n dendeg_spinBox.setMinimum(1)\r\n numdeg_spinBox.valueChanged.connect(lambda: on_rational_deg_spinbox_changed(numdeg_spinBox.value(), dendeg_spinBox.value()))\r\n dendeg_spinBox.valueChanged.connect(lambda: on_rational_deg_spinbox_changed(numdeg_spinBox.value(), dendeg_spinBox.value()))\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n self.options.setMethod(method='2D: curve_fit')\r\n self.options.setInitializationParameters(coefficients=['p0', 'q0'])\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n fitopt_Button.clicked.connect(lambda: self.options.show())\r\n methods_layout[(1, 0), (1, 2)] = numdeg_label\r\n methods_layout[(1, 2), (1, 6)] = numdeg_spinBox\r\n methods_layout[(2, 0), (1, 2)] = dendeg_label\r\n methods_layout[(2, 2), (1, 6)] = dendeg_spinBox\r\n methods_layout[(3, 0), (1, 6)] = centerScale_checkBox\r\n methods_layout[(4, 7), (1, 1)] = fitopt_Button\r\n methods_dict.update({'numdeg': numdeg_spinBox, 'dendeg': dendeg_spinBox, 'center_and_scale': centerScale_checkBox})\r\n elif method == \"Smoothing Spline\":\r\n smooth_param_label = QtWidgets.QLabel(\"Smoothing Parameter\")\r\n default_radioButton = QtWidgets.QRadioButton(\"Default\")\r\n specify_radioButton = QtWidgets.QRadioButton(\"Specify:\")\r\n dec_button = QtWidgets.QPushButton(\"<\")\r\n dec_button.setToolTip(\"Smoother\")\r\n inc_button = QtWidgets.QPushButton(\">\")\r\n inc_button.setToolTip(\"Rougher\")\r\n specify_lineEdit = QtWidgets.QLineEdit(\"0.999888\")\r\n specify_lineEdit.setMinimumWidth(100)\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n methods_layout[(1, 0), (1, 6)] = smooth_param_label\r\n methods_layout[(2, 0), (1, 6)] = default_radioButton\r\n methods_layout[(3, 0), (1, 1)] = specify_radioButton\r\n methods_layout[(3, 1), (1, 1)] = dec_button\r\n methods_layout[(3, 2), (1, 5)] = specify_lineEdit\r\n methods_layout[(3, 7), (1, 1)] = inc_button\r\n methods_layout[(4, 0), (1, 6)] = centerScale_checkBox\r\n elif method == \"Sum of Sine\":\r\n numterms_label = QtWidgets.QLabel(\"Equation:\")\r\n numterms_comboBox = QtWidgets.QComboBox()\r\n numterms_comboBox.addItems([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"])\r\n eqs_eqs_label = QtWidgets.QLabel(\"Equation:\")\r\n eqs_label = ElideQLabel(\"a1*sin(b1*x+c1)\")\r\n eqs_label.setMinimumWidth(200)\r\n numterms_comboBox.currentIndexChanged.connect(lambda index:\r\n eqs_label.setText({0: \"a1*sin(b1*x+c1)\",\r\n 1: \"a1*sin(b1*x+c1) + a2*sin(b2*x+c2)\",\r\n 2: \"a1*sin(b1*x+c1) + ... + a3*sin(b3*x+c3)\",\r\n 3: \"a1*sin(b1*x+c1) + ... + a4*sin(b4*x+c3)\",\r\n 4: \"a1*sin(b1*x+c1) + ... + a5*sin(b5*x+c3)\",\r\n 5: \"a1*sin(b1*x+c1) + ... + a6*sin(b6*x+c3)\",\r\n 6: \"a1*sin(b1*x+c1) + ... + a7*sin(b7*x+c3)\",\r\n 7: \"a1*sin(b1*x+c1) + ... + a8*sin(b8*x+c3)\",\r\n }.get(index)))\r\n centerScale_checkBox = QtWidgets.QCheckBox(\"Center and scale\")\r\n centerScale_checkBox.setCheckState(self.centerscale)\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n methods_layout[(1, 0), (1, 1)] = numterms_label\r\n methods_layout[(1, 1), (1, 7)] = numterms_comboBox\r\n methods_layout[(2, 0), (1, 1)] = eqs_eqs_label\r\n methods_layout[(2, 1), (1, 7)] = eqs_label\r\n methods_layout[(3, 0), (1, 7)] = centerScale_checkBox\r\n methods_layout[(4, 7), (1, 1)] = fitopt_Button\r\n elif method == \"Weibull\":\r\n eqs_label_label = QtWidgets.QLabel(\"Equation:\")\r\n eqs_label = QtWidgets.QLabel(\"a*b*x^(b-1)*exp(-a*x^b)\")\r\n fitopt_Button = QtWidgets.QPushButton(\"Fit Options...\")\r\n fitopt_Button.clicked.connect(lambda: self.options.show())\r\n self.options.setMethod(method='2D: curve_fit')\r\n self.options.setInitializationParameters(coefficients=['a','b'])\r\n methods_layout[(1, 0), (1, 1)] = eqs_label_label\r\n methods_layout[(1, 1), (1, 1)] = eqs_label\r\n methods_layout[(2, 7), (1, 1)] = fitopt_Button\r\n methods_dict.update({'terms': eqs_label})\r\n else:\r\n # TODO: Poisson, Logarithmic, Logistic/Sigmoid\r\n raise(NotImplementedError(\"Unrecognized method: {}\".format(method)))\r\n return methods_layout, methods_dict\r\n\r\n def initialize_autofit(self, gbox):\r\n \"\"\"Initialize the autofit groupBox in the tab\"\"\"\r\n gbox.setLayout(QtWidgets.QVBoxLayout())\r\n autofit_checkBox = QtWidgets.QCheckBox(\"Auto fit\")\r\n autofit_checkBox.setCheckState(self.autofit)\r\n fit_pushButton = QtWidgets.QPushButton(\"Fit\")\r\n fit_pushButton.clicked.connect(lambda: self.curveFit())\r\n fit_pushButton.setEnabled(False)\r\n stop_pushButton = QtWidgets.QPushButton(\"Stop\")\r\n stop_pushButton.clicked.connect(lambda: self.onStopButtonClicked())\r\n stop_pushButton.setEnabled(False)\r\n autofit_checkBox.stateChanged.connect(lambda checked: self.onAutoFitCheckBoxToggled(checked, fit_pushButton, stop_pushButton))\r\n\r\n gbox.layout().addWidget(autofit_checkBox)\r\n gbox.layout().addWidget(fit_pushButton)\r\n gbox.layout().addWidget(stop_pushButton)\r\n\r\n def onAutoFitCheckBoxToggled(self, checked, fit_pushButton, stop_pushButton):\r\n self.autofit = checked\r\n if checked:\r\n fit_pushButton.setEnabled(False)\r\n stop_pushButton.setEnabled(False)\r\n else:\r\n fit_pushButton.setEnabled(True)\r\n stop_pushButton.setEnabled(True)\r\n\r\n def onStopButtonClicked(self):\r\n pass\r\n\r\n def initialize_results(self, gbox):\r\n \"\"\"Initialize the results groupBox in the tab\"\"\"\r\n gbox.setLayout(QtWidgets.QGridLayout())\r\n self.result_textBox = QtWidgets.QTextEdit()\r\n self.result_textBox.setReadOnly(True)\r\n self.result_textBox.setLineWrapMode(0)\r\n gbox.layout().addWidget(self.result_textBox)\r\n\r\n def initialize_display(self, gbox, method='2D'):\r\n \"\"\"Initialize the display groupBox in the tab\"\"\"\r\n method_comboBox = self.method_groupBox.layout().itemAt(0).widget()\r\n if method == '2D':\r\n if not isinstance(self.graphicsView, pg.graphicsItems.PlotItem.PlotItem):\r\n self.graphicsView = GraphicsLayoutWidget()\r\n self.graphicsView.setObjectName('2D')\r\n self.graphicsView.setBackground('w')\r\n self.graphicsView.addPlot(row=0, col=0) # add a plot item\r\n self.setMethodLists(method_comboBox, methodList=2)\r\n else: # 3D\r\n if not isinstance(self.graphicsView, pg.opengl.GLViewWidget):\r\n self.graphicsView = gl.GLViewWidget()\r\n self.graphicsView.setObjectName('3D')\r\n #self.graphicsView.setBackgroundColor('k')\r\n self.setMethodLists(method_comboBox, methodList=1)\r\n\r\n self.graphicsView.setObjectName(_fromUtf8(\"graphicsView\"))\r\n if gbox.layout() is None:\r\n gbox.setLayout(QtWidgets.QGridLayout())\r\n self.clearWidget(gbox) # remove other widgets\r\n gbox.layout().addWidget(self.graphicsView)\r\n\r\n def graphData(self, data_list):\r\n \"\"\"Scatter plots of data points\"\"\"\r\n if set(data_list) == set(['xdata']): # 1D data\r\n s1 = pg.ScatterPlotItem(size=5)\r\n s1.addPoints(x=np.arange(0, len(self.xdata)), y=self.xdata, pen='k', brush='k')\r\n self.initialize_display(self.display_groupBox, method='2D')\r\n p = self.graphicsView.getItem(row=0, col=0)\r\n p.clear()\r\n p.showGrid(x=True, y=True)\r\n p.setLabels(left='{}'.format(self.varnames['xdata']), bottom='Index')\r\n p.addItem(s1)\r\n p.autoRange()\r\n elif set(data_list) == set(['xdata', 'ydata']): # 2D data\r\n s1 = pg.ScatterPlotItem(size=5)\r\n s1.addPoints(x=self.xdata, y=self.ydata, pen='k', brush='k')\r\n self.initialize_display(self.display_groupBox, method='2D')\r\n p = self.graphicsView.getItem(row=0, col=0)\r\n p.clear()\r\n p.showGrid(x=True, y=True)\r\n p.setLabels(left='{}'.format(self.varnames['ydata']), bottom='{}'.format(self.varnames['xdata']))\r\n p.addItem(s1)\r\n p.autoRange()\r\n elif set(self.availableData) == set(['xdata', 'ydata', 'wdata']): # 2D data\r\n s1 = pg.ScatterPlotItem(pxMode=False) ## Set pxMode=False to allow spots to transform with the view\r\n spots = []\r\n for xx, yy, ww in zip(self.xdata, self.ydata, self.wdata):\r\n spots.append({'pos': (xx, yy), 'size': 5 * ww, 'pen': {'color': None},\r\n 'brush': (0, 0, 0, 120)})\r\n s1.addPoints(spots)\r\n self.initialize_display(self.display_groupBox, method='2D')\r\n p = self.graphicsView.getItem(row=0, col=0)\r\n p.clear()\r\n p.showGrid(x=True, y=True)\r\n p.setLabels(left='{}'.format(self.varnames['ydata']), bottom='{}'.format(self.varnames['xdata']))\r\n p.addItem(s1)\r\n p.autoRange()\r\n elif set(self.availableData) == set(['xdata', 'ydata', 'zdata']): # 3D data\r\n # Set the graphics view to 3D\r\n spots = gl.GLScatterPlotItem(pos=np.c_[self.xdata, self.ydata, self.zdata],\r\n color=np.tile([1, 1, 1, 0.5], (len(self.xdata), 1)),\r\n size=5*np.ones(len(self.xdata)))\r\n self.initialize_display(self.display_groupBox, method='3D')\r\n # Remove everything\r\n for item in self.graphicsView.items:\r\n item._setView(None)\r\n self.graphicsView.items = []\r\n self.graphicsView.update()\r\n self.graphicsView.addItem(spots)\r\n self.graphicsView.addItem(gl.GLGridItem())\r\n elif set(self.availableData) == set(['xdata', 'ydata', 'zdata', 'wdata']): # 3D data\r\n spots = gl.GLScatterPlotItem(pos=np.c_[self.xdata, self.ydata, self.zdata],\r\n color=np.tile([1, 1, 1, 0.5], (len(self.xdata), 1)),\r\n size=5*self.wdata)\r\n self.initialize_display(self.display_groupBox, method='3D')\r\n for item in self.graphicsView.items:\r\n item._setView(None)\r\n self.graphicsView.items = []\r\n self.graphicsView.update()\r\n self.graphicsView.addItem(spots)\r\n self.graphicsView.addItem(gl.GLGridItem())\r\n else:\r\n return # do nothing. Data not expected\r\n\r\n def graphFit(self, f0=None, popt=None):\r\n \"\"\"Plot the fitted function\"\"\"\r\n if self.params['dim'] == '2D':\r\n p = self.graphicsView.getItem(row=0, col=0)\r\n # check if a fit already exist. If so, remove it\r\n for k, a in enumerate(p.listDataItems()):\r\n if a.name() == 'fit': # matching\r\n # Remove the trace\r\n p.removeItem(a)\r\n\r\n if f0 is None or isinstance(popt, Exception): return\r\n if self.params['dim'] == '2D':\r\n p_xrange, p_yrange = p.viewRange()\r\n x0_fit = np.linspace(p_xrange[0], p_xrange[1], 100)\r\n y0_fit = f0(x0_fit, *popt)\r\n # Replot the fit\r\n cl = p.plot(x=x0_fit, y=y0_fit, pen='r', name='fit')\r\n # Make sure to set the original view back\r\n p.setXRange(p_xrange[0], p_xrange[1], padding=0)\r\n p.setYRange(p_yrange[0], p_yrange[1], padding=0)\r\n else: # 3D\r\n self.graphicsView # 3D GL\r\n\r\n def outputResultText(self, model):\r\n if 'final_text' in model.keys():\r\n self.result_textBox.setText(model['final_text'])\r\n return\r\n\r\n final_text = \"\"\"\r\n {}:\r\n f(x) = {}\r\n \"\"\".format(model['type'], model['formula'])\r\n\r\n # Coefficients\r\n coef_format = \" {} = {:.3g} ({:.3g}, {:.3g})\\n \" # var = mean (lower, upper)\r\n coef_text = \"\"\"\r\n Coefficeints \r\n (95% confidence interval):\r\n \"\"\"\r\n for n, kk in enumerate(model['ci']):\r\n coef_text = coef_text + coef_format.format(kk['name'], kk['mean'], kk['lower'], kk['upper'])\r\n final_text = final_text + coef_text\r\n\r\n final_text = final_text + \"\"\"\r\n Goodness of fit:\r\n SSE: {:.5f}\r\n RMSE: {:.5f}\r\n R-square: {:.5f}\r\n Adjusted R-square: {:.5f}\r\n \"\"\".format(model['SSE'], model['RMSE'], model['rsquare'], model['adjrsquare'])\r\n self.result_textBox.setText(final_text)\r\n\r\n def whatTab(self):\r\n \"\"\"For reference\"\"\"\r\n self.currentTab = self.tabWidget.currentWidget()\r\n pass\r\n\r\n def closeTab(self, currentIndex):\r\n if self.tabWidget.count() < 2:\r\n return # Do not close the last tab\r\n currentQWidget = self.tabWidget.widget(currentIndex)\r\n currentQWidget.deleteLater()\r\n self.tabWidget.removeTab(currentIndex)\r\n\r\n def setMenuBarItems(self):\r\n # <editor-fold desc=\"File menu\">\r\n self.menuFile = QtWidgets.QMenu(self.menubar)\r\n self.menuFile.setObjectName(\"menuFile\")\r\n\r\n self.actionClear_Session = QtWidgets.QAction(self)\r\n self.actionClear_Session.setObjectName(\"actionClear_Session\")\r\n self.actionClear_Session.setText(_translate(\"MainWindow\", \"Clear Session\"))\r\n self.menuFile.addAction(self.actionClear_Session)\r\n\r\n self.actionLoad_Session = QtWidgets.QAction(self)\r\n self.actionLoad_Session.setObjectName(\"actionLoad_Session\")\r\n self.actionLoad_Session.setText(_translate(\"MainWindow\", \"Load Session...\"))\r\n self.menuFile.addAction(self.actionLoad_Session)\r\n\r\n self.actionSave_Session = QtWidgets.QAction(self)\r\n self.actionSave_Session.setObjectName(\"actionSave_Session\")\r\n self.actionSave_Session.setText(_translate(\"MainWindow\", \"Save Session\"))\r\n self.menuFile.addAction(self.actionSave_Session)\r\n\r\n self.actionSave_Session_As = QtWidgets.QAction(self)\r\n self.actionSave_Session_As.setObjectName(\"actionSave_Session_As\")\r\n self.actionSave_Session_As.setText(_translate(\"MainWindow\", \"Save Session As...\"))\r\n self.menuFile.addAction(self.actionSave_Session_As)\r\n\r\n self.actionGenerate_Code = QtWidgets.QAction(self)\r\n self.actionGenerate_Code.setObjectName(\"actionGenerate_Code\")\r\n self.actionGenerate_Code.setText(_translate(\"MainWindow\", \"Generate Code\"))\r\n self.menuFile.addAction(self.actionGenerate_Code)\r\n\r\n self.actionPrint_to_Figure = QtWidgets.QAction(self)\r\n self.actionPrint_to_Figure.setObjectName(\"actionPrint_to_Figure\")\r\n self.actionPrint_to_Figure.setText(_translate(\"MainWindow\", \"Print to Figure\"))\r\n self.menuFile.addAction(self.actionPrint_to_Figure)\r\n\r\n self.menuFile.addSeparator()\r\n self.actionClose_Curve_Fitting = QtWidgets.QAction(self)\r\n self.actionClose_Curve_Fitting.setObjectName(\"actionClose_Curve_Fitting\")\r\n self.menuFile.addAction(self.actionClose_Curve_Fitting)\r\n self.actionClose_Curve_Fitting.setText(_translate(\"MainWindow\", \"Close Curve Fitting\"))\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Fit menu\">\r\n self.menuFit = QtWidgets.QMenu(self.menubar)\r\n self.menuFit.setObjectName(\"menuFit\")\r\n\r\n self.actionNew_Fit = QtWidgets.QAction(self)\r\n self.actionNew_Fit.setObjectName(\"actionNew_Fit\")\r\n self.actionNew_Fit.setText(_translate(\"MainWindow\", \"New Fit\"))\r\n self.actionNew_Fit.triggered.connect(lambda: self.newFit())\r\n self.menuFit.addAction(self.actionNew_Fit)\r\n\r\n self.actionOpen_Fit = QtWidgets.QAction(self)\r\n self.actionOpen_Fit.setObjectName(\"actionOpen_Fit\")\r\n self.actionOpen_Fit.setText(_translate(\"MainWindow\", \"Open Fit\"))\r\n self.menuFit.addAction(self.actionOpen_Fit)\r\n\r\n self.actionClose = QtWidgets.QAction(self)\r\n self.actionClose.setObjectName(\"actionClose\")\r\n self.actionClose.setText(_translate(\"MainWindow\", \"Close \\\"\\\"\"))\r\n self.menuFit.addAction(self.actionClose)\r\n\r\n self.actionDelete = QtWidgets.QAction(self)\r\n self.actionDelete.setObjectName(\"actionDelete\")\r\n self.actionDelete.setText(_translate(\"MainWindow\", \"Delete \\\"\\\"\"))\r\n self.menuFit.addAction(self.actionDelete)\r\n\r\n self.actionDuplicate = QtWidgets.QAction(self)\r\n self.actionDuplicate.setObjectName(\"actionDuplicate\")\r\n self.actionDuplicate.setText(_translate(\"MainWindow\", \"Duplicate \\\"\\\"\"))\r\n self.menuFit.addAction(self.actionDuplicate)\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"View menu\">\r\n self.menuView = QtWidgets.QMenu(self.menubar)\r\n self.menuView.setObjectName(\"menuView\")\r\n\r\n self.actionFit_Settings = QtWidgets.QAction(self)\r\n self.actionFit_Settings.setObjectName(\"actionFit_Settings\")\r\n self.actionFit_Settings.setText(_translate(\"MainWindow\", \"Fit Settings\"))\r\n self.menuView.addAction(self.actionFit_Settings)\r\n\r\n self.actionFit_Results = QtWidgets.QAction(self)\r\n self.actionFit_Results.setObjectName(\"actionFit_Results\")\r\n self.actionFit_Results.setText(_translate(\"MainWindow\", \"Fit Results\"))\r\n self.menuView.addAction(self.actionFit_Results)\r\n\r\n self.menuView.addSeparator()\r\n self.actionMain_Plot = QtWidgets.QAction(self)\r\n self.actionMain_Plot.setObjectName(\"actionMain_Plot\")\r\n self.actionMain_Plot.setText(_translate(\"MainWindow\", \"Main Plot\"))\r\n self.menuView.addAction(self.actionMain_Plot)\r\n\r\n self.actionResidual_Plot = QtWidgets.QAction(self)\r\n self.actionResidual_Plot.setObjectName(\"actionResidual_Plot\")\r\n self.actionResidual_Plot.setText(_translate(\"MainWindow\", \"Residual Plot\"))\r\n self.menuView.addAction(self.actionResidual_Plot)\r\n\r\n self.actionContour_Plot = QtWidgets.QAction(self)\r\n self.actionContour_Plot.setObjectName(\"actionContour_Plot\")\r\n self.actionContour_Plot.setText(_translate(\"MainWindow\", \"Contour Plot\"))\r\n self.menuView.addAction(self.actionContour_Plot)\r\n\r\n self.menuView.addSeparator()\r\n self.actionTable_of_Fits = QtWidgets.QAction(self)\r\n self.actionTable_of_Fits.setObjectName(\"actionTable_of_Fits\")\r\n self.actionTable_of_Fits.setText(_translate(\"MainWindow\", \"Table of Fits\"))\r\n self.menuView.addAction(self.actionTable_of_Fits)\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Tools menu\">\r\n self.menuTools = QtWidgets.QMenu(self.menubar)\r\n self.menuTools.setObjectName(\"menuTools\")\r\n\r\n self.actionZoom_In = QtWidgets.QAction(self)\r\n self.actionZoom_In.setObjectName(\"actionZoom_In\")\r\n self.menuTools.addAction(self.actionZoom_In)\r\n self.actionZoom_In.setText(_translate(\"MainWindow\", \"Zoom In\"))\r\n\r\n self.actionZoom_Out = QtWidgets.QAction(self)\r\n self.actionZoom_Out.setObjectName(\"actionZoom_Out\")\r\n self.actionZoom_Out.setText(_translate(\"MainWindow\", \"Zoom Out\"))\r\n self.menuTools.addAction(self.actionZoom_Out)\r\n\r\n self.actionPan = QtWidgets.QAction(self)\r\n self.actionPan.setObjectName(\"actionPan\")\r\n self.actionPan.setText(_translate(\"MainWindow\", \"Pan\"))\r\n self.menuTools.addAction(self.actionPan)\r\n\r\n self.actionData_Cursor = QtWidgets.QAction(self)\r\n self.actionData_Cursor.setObjectName(\"actionData_Cursor\")\r\n self.actionData_Cursor.setText(_translate(\"MainWindow\", \"Data Cursor\"))\r\n self.menuTools.addAction(self.actionData_Cursor)\r\n\r\n self.menuTools.addSeparator()\r\n self.actionLegend = QtWidgets.QAction(self)\r\n self.actionLegend.setObjectName(\"actionLegend\")\r\n self.actionLegend.setText(_translate(\"MainWindow\", \"Legend\"))\r\n self.menuTools.addAction(self.actionLegend)\r\n\r\n self.actionGrid = QtWidgets.QAction(self)\r\n self.actionGrid.setObjectName(\"actionGrid\")\r\n self.actionGrid.setText(_translate(\"MainWindow\", \"Grid\"))\r\n self.menuTools.addAction(self.actionGrid)\r\n\r\n self.menuTools.addSeparator()\r\n self.actionPrediction_Bounds = QtWidgets.QAction(self)\r\n self.actionPrediction_Bounds.setObjectName(\"actionPrediction_Bounds\")\r\n self.actionPrediction_Bounds.setText(_translate(\"MainWindow\", \"Prediction Bounds\"))\r\n self.menuTools.addAction(self.actionPrediction_Bounds)\r\n\r\n self.menuTools.addSeparator()\r\n self.actionAxes_Limits = QtWidgets.QAction(self)\r\n self.actionAxes_Limits.setObjectName(\"actionAxes_Limits\")\r\n self.actionAxes_Limits.setText(_translate(\"MainWindow\", \"Axes Limits\"))\r\n self.menuTools.addAction(self.actionAxes_Limits)\r\n\r\n self.actionExclude_By_Rule = QtWidgets.QAction(self)\r\n self.actionExclude_By_Rule.setObjectName(\"actionExclude_By_Rule\")\r\n self.actionExclude_By_Rule.setText(_translate(\"MainWindow\", \"Exclude By Rule\"))\r\n self.menuTools.addAction(self.actionExclude_By_Rule)\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Add actions\">\r\n self.menubar.addAction(self.menuFile.menuAction())\r\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\r\n self.menubar.addAction(self.menuFit.menuAction())\r\n self.menuFit.setTitle(_translate(\"MainWindow\", \"Fit\"))\r\n self.menubar.addAction(self.menuView.menuAction())\r\n self.menuView.setTitle(_translate(\"MainWindow\", \"View\"))\r\n self.menubar.addAction(self.menuTools.menuAction())\r\n self.menuTools.setTitle(_translate(\"MainWindow\", \"Tools\"))\r\n # </editor-fold>\r\n\r\n def filterVars(self, vars_list):\r\n \"\"\"Determine what to data variables to load\"\"\"\r\n vars_final = {}\r\n for kk in vars_list:\r\n for key, value in kk.items():\r\n if isinstance(value, np.ndarray):\r\n vars_final[key] = value\r\n elif isinstance(value, (list, tuple)):\r\n vars_final[key] = np.array(value)\r\n return vars_final\r\n\r\n def getParams(self):\r\n for key, widget in self.methods.items():\r\n if isinstance(widget, str):\r\n self.params[key] = widget\r\n elif isinstance(widget, QtWidgets.QLineEdit):\r\n self.params[key] = str2numericHandleError(widget.text())\r\n elif isinstance(widget, QtWidgets.QComboBox):\r\n self.params[key] = str2numericHandleError(widget.currentText())\r\n elif isinstance(widget, QtWidgets.QLabel):\r\n self.params[key] = str2numericHandleError(widget.text())\r\n elif isinstance(widget, ElideQLabel):\r\n self.params[key] = str2numericHandleError(widget.text())\r\n elif isinstance(widget, QCodeEdit):\r\n self.params[key] = widget.text()\r\n elif isinstance(widget, QtWidgets.QCheckBox):\r\n self.params[key] = True if widget.checkState() > 0 else False\r\n elif isinstance(widget, QtWidgets.QSpinBox):\r\n self.params[key] = widget.value()\r\n else:\r\n raise(TypeError('Unrecognized type of item'))\r\n\r\n def curveFit(self):\r\n \"\"\"Interface the curve fitting\"\"\"\r\n self.getParams() # get parameters from the current method\r\n self.options.getParams() # get parameters if there is fit options\r\n\r\n # Preprocessing\r\n if self.centerscale:\r\n xdata0 = (self.xdata - np.mean(self.xdata)) / np.std(self.xdata)\r\n ydata0 = (self.ydata - np.mean(self.ydata)) / np.std(self.ydata)\r\n zdata0 = (self.zdata - np.mean(self.zdata)) / np.std(self.zdata)\r\n wdata0 = self.wdata\r\n else:\r\n xdata0, ydata0, zdata0, wdata0 = self.xdata, self.ydata, self.zdata, self.wdata\r\n\r\n # Initialize a dict for result outputs\r\n model = {'type': self.params['dim'] + \": \" + self.params['method']}\r\n # Fit data case by case\r\n print(self.params['dim'])\r\n if self.params['dim'] == '2D':\r\n #if ~np.isnan(wdata0):\r\n #ydata0 = ydata0 * wdata0\r\n if self.params['method'] in ['Exponential', 'Power', 'Weibull']:\r\n popt, pcov, f0, fitted_params = self.fitTerms(xdata0, ydata0, terms=self.params['terms'])\r\n self.graphFit(f0, popt) # Plot the fit\r\n model.update({'formula': self.params['terms']}) # Output fitting results to the result box\r\n elif self.params['method'] == 'Polynomial':\r\n popt, pcov = np.polyfit(xdata0, ydata0, deg=self.params['degree'],\r\n w=None if np.isnan(wdata0) else wdata0, cov=True)\r\n f0 = lambda x, *args: np.polyval(args, x)\r\n self.graphFit(f0, popt) # Plot the fit\r\n model['type'] = model['type'] + ' (deg={:d})'.format(self.params['degree'])\r\n # Make the polynomial string\r\n model['formula'] = {1: 'p0 + p1 * x', 2: 'p0 + p1 * x + p2 * x^2'}.get(self.params['degree'],\r\n 'p0 + p1 * x + ... + p{0} * x^{0}'.format(self.params['degree']))\r\n # Fitted params\r\n fitted_params = ['p{:d}'.format(deg) for deg in range(self.params['degree']+1)]\r\n elif self.params['method'] == 'Rational':\r\n f0, f0_str = self.make_rational_function(p=self.params['numdeg'], q=self.params['dendeg'])\r\n num_formula = {0: 'p0', 1: 'p1 * x + p0', 2: 'p2 * x^2 + p1 * x + p0'}.get(self.params['numdeg'],\r\n 'p{0} * x^{0} + ... + p1 * x + p0'.format(self.params['numdeg']))\r\n den_formula = {1: 'x + q0', 2: 'x^2 + q1 * x + p0', 3: 'x^3 + q2 * x^2 + q1 * x + q0'}.get(self.params['dendeg'],\r\n 'x^{0} + ... + q1 * x + q0'.format(self.params['dendeg']))\r\n\r\n model['formula'] = ' {}\\n {}\\n {}'.format(num_formula, \"-\"*(max(len(num_formula), len(den_formula))+5), den_formula)\r\n model['type'] = model['type'] + ' (p = {:d}, q = {:d})'.format(self.params['numdeg'], self.params['dendeg'])\r\n\r\n fitted_params = self.list_rational_terms(self.params['numdeg'], self.params['dendeg'])\r\n popt, pcov, = self.call_curve_fit(f0, xdata0, ydata0, fitted_params)\r\n self.graphFit(f0, popt) # Plot the fit\r\n else:\r\n return\r\n else: # 3D\r\n return\r\n\r\n # Output to results\r\n if pcov is not None:\r\n # Calculate goodness of fit\r\n gof = goodness_of_fit(xdata0, ydata0, popt, pcov, f0)\r\n model.update(gof)\r\n # Calcualte confidence interval\r\n ci_list = confidence_interval(ydata0, popt, pcov, alpha=0.05, parameter_names=fitted_params)\r\n model['ci'] = ci_list\r\n if self.params['method'] == 'Polynomial': pass\r\n\r\n self.outputResultText(model)\r\n else:\r\n self.outputResultText({'final_text': str(popt)})\r\n\r\n def fitTerms(self, x0, y0, terms='a*exp(b*x)+c'):\r\n \"\"\"Fit a function with predefined equation\"\"\"\r\n if (isnumber(x0) and np.isnan(x0)) or (isnumber(y0) and np.isnan(y0)):\r\n return Exception(\"Need at least 2 dimensional data to fit\"), None, None, None\r\n\r\n p0, bounds0 = None, None\r\n\r\n if terms == 'a*exp(b*x)':\r\n f0 = lambda x, a, b: a * np.exp(b * x)\r\n fitted_params = ['a', 'b']\r\n elif terms == 'a*exp(b*x)+c':\r\n f0 = lambda x, a, b, c: a * np.exp(b * x) + c\r\n fitted_params = ['a', 'b', 'c']\r\n # Add in help for fitting\r\n p0 = fit_exp_with_offset(x0, y0)\r\n elif terms == 'a*exp(b*x) + c*exp(d*x)':\r\n f0 = lambda x, a, b, c, d: a * np.exp(b * x) + c * np.exp(d * x)\r\n fitted_params = ['a', 'b', 'c', 'd']\r\n elif terms == 'a*b*x^(b-1)*exp(-a*x^b)': # Weibull\r\n f0 = lambda x, a, b: a * b * x ** (b - 1) * np.exp(-a * x ** b)\r\n fitted_params = ['a', 'b']\r\n elif terms == 'a*x^b':\r\n f0 = lambda x, a, b: a * x ** b\r\n fitted_params = ['a', 'b']\r\n elif terms == 'a*x^b+c':\r\n f0 = lambda x, a, b, c: a * x ** b + c\r\n fitted_params = ['a', 'b', 'c']\r\n\r\n popt, pcov = self.call_curve_fit(f0, x0, y0, fitted_params, p0=p0, bounds0=bounds0)\r\n\r\n return popt, pcov, f0, fitted_params\r\n\r\n def call_curve_fit(self, f0, x0, y0, fitted_params, p0=None, bounds0=None):\r\n params = self.options.params\r\n if p0 is None:\r\n p0 = [params['coefficients'][l][0] for l in fitted_params]\r\n if bounds0 is None:\r\n bounds0 = (\r\n [params['coefficients'][l][1] for l in fitted_params], [params['coefficients'][l][2] for l in fitted_params])\r\n method = {'Trust-Region Reflective': 'trf', 'Levenberg-Marquardt': 'lm', 'Dog-Box': 'dogbox'}.get(params['algorithm'])\r\n\r\n try:\r\n if method == 'lm':\r\n popt, pcov = curve_fit(f0, x0, y0,\r\n p0=p0, bounds=bounds0, method=method,\r\n maxfev=0 if params['maxfev'] is None else params['maxfev'],\r\n ftol=params['ftol'],\r\n xtol=params['xtol'], gtol=params['gtol'])\r\n else:\r\n popt, pcov = curve_fit(f0, x0, y0,\r\n p0=p0, bounds=bounds0, method=method,\r\n max_nfev=params['maxfev'],\r\n ftol=params['ftol'], loss=params['loss'],\r\n xtol=params['xtol'], gtol=params['gtol'])\r\n return popt, pcov\r\n except Exception as err:\r\n print(err)\r\n # set_trace()\r\n return err, None\r\n\r\n def make_rational_function(self, p, q):\r\n p_list = \", \".join([\"p{:d}\".format(n) for n in range(p+1)])\r\n q_list = \", \".join([\"q{:d}\".format(n) for n in range(q)])\r\n p_reverse_list = \", \".join([\"p{:d}\".format(n) for n in reversed(range(p+1))])\r\n q_reverse_list = \", \".join([\"q{:d}\".format(n) for n in reversed(range(q))])\r\n q_reverse_list = \"1.0, \" + q_reverse_list\r\n f0_str = \"lambda x, {0}, {1}: np.polyval([{2}], x) / np.polyval([{3}], x)\".format(\r\n p_list, q_list, p_reverse_list, q_reverse_list)\r\n return eval(f0_str), f0_str # necessary evil\r\n\r\n def list_rational_terms(self, p, q):\r\n p_terms = [\"p{:d}\".format(n) for n in range(p+1)]\r\n q_terms = [\"q{:d}\".format(n) for n in range(q)]\r\n return p_terms + q_terms\r\n\r\n def retranslateUi(self, MainWindow):\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n\r\n\r\ndef cftool(*args, **kwargs):\r\n \"\"\"\r\n Wrapper function to call cftool GUI\r\n :param xdata: x data\r\n :param ydata: y data\r\n :param zdata: z data (if 3D)\r\n :param wdata: weights\r\n :return:\r\n \"\"\"\r\n pass\r\n\r\nif __name__ == \"__main__\":\r\n np.random.seed(42)\r\n f0 = lambda x, p0, p1, p2, q0, q1, q2: np.polyval([p2, p1, p0], x) / np.polyval([1, q2, q1, q0], x)\r\n Xdata = np.random.randn(1000)/100 + np.arange(0, 10, 0.01)+1\r\n Ydata = np.random.randn(1000)/100 + f0(Xdata, 1, 5, 3, 2, 4, 3) #5 * np.exp(-0.2*Xdata)+1\r\n Zdata = np.random.randn(1000)\r\n Wdata = np.random.randn(1000)*5\r\n X_small = np.array([1, 2, 3, 4])\r\n sys.excepthook = my_excepthook # helps prevent uncaught exception crashing the GUI\r\n app = QtWidgets.QApplication(sys.argv)\r\n w = cftool_MainWindow()\r\n w.show()\r\n\r\n sys.exit(app.exec_())"
},
{
"alpha_fraction": 0.48502883315086365,
"alphanum_fraction": 0.507904052734375,
"avg_line_length": 44.232757568359375,
"blob_id": "474ab28a58321a9b81cb15903837373491deb156",
"content_id": "b36c399358760d02b077b42019f6f1af291e45e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5377,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 116,
"path": "/generic/Arima.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\r\n\r\nimport itertools\r\nimport warnings\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport statsmodels.api as sm\r\n\r\n\r\nclass ArimaCustom():\r\n def __init__(self, arima_para, seasonal_para):\r\n # Define the p, d and q parameters in Arima(p,d,q)(P,D,Q) models\r\n p = arima_para['p']\r\n d = arima_para['d']\r\n q = arima_para['q']\r\n # Generate all different combinations of p, q and q triplets\r\n self.pdq = list(itertools.product(p, d, q))\r\n # Generate all different combinations of seasonal p, q and q triplets\r\n self.seasonal_pdq = [(x[0], x[1], x[2], seasonal_para)\r\n for x in list(itertools.product(p, d, q))]\r\n\r\n def fit(self, ts):\r\n warnings.filterwarnings(\"ignore\")\r\n results_list = []\r\n for param in self.pdq:\r\n for param_seasonal in self.seasonal_pdq:\r\n try:\r\n mod = sm.tsa.statespace.SARIMAX(ts,\r\n order=param,\r\n seasonal_order=param_seasonal,\r\n enforce_stationarity=False,\r\n enforce_invertibility=False)\r\n results = mod.fit()\r\n\r\n print('ARIMA{}x{}seasonal - AIC:{}'.format(param,\r\n param_seasonal, results.aic))\r\n results_list.append([param, param_seasonal, results.aic])\r\n except:\r\n continue\r\n results_list = np.array(results_list)\r\n lowest_AIC = np.argmin(results_list[:, 2])\r\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\r\n print('ARIMA{}x{}seasonal with lowest_AIC:{}'.format(\r\n results_list[lowest_AIC, 0], results_list[lowest_AIC, 1], results_list[lowest_AIC, 2]))\r\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\r\n\r\n mod = sm.tsa.statespace.SARIMAX(ts,\r\n order=results_list[lowest_AIC, 0],\r\n seasonal_order=results_list[lowest_AIC, 1],\r\n enforce_stationarity=False,\r\n enforce_invertibility=False)\r\n self.final_result = mod.fit()\r\n print('Final model summary:')\r\n print(self.final_result.summary().tables[1])\r\n print('Final model diagnostics:')\r\n self.final_result.plot_diagnostics(figsize=(15, 12))\r\n plt.tight_layout()\r\n plt.savefig('model_diagnostics.png', dpi=300)\r\n plt.show()\r\n\r\n def pred(self, ts, plot_start, pred_start, dynamic, ts_label):\r\n\r\n pred_dynamic = self.final_result.get_prediction(\r\n start=pd.to_datetime(pred_start), dynamic=dynamic, full_results=True)\r\n pred_dynamic_ci = pred_dynamic.conf_int()\r\n ax = ts[plot_start:].plot(label='observed', figsize=(15, 10))\r\n\r\n if dynamic == False:\r\n pred_dynamic.predicted_mean.plot(\r\n label='One-step ahead Forecast', ax=ax)\r\n else:\r\n pred_dynamic.predicted_mean.plot(label='Dynamic Forecast', ax=ax)\r\n\r\n ax.fill_between(pred_dynamic_ci.index,\r\n pred_dynamic_ci.iloc[:, 0],\r\n pred_dynamic_ci.iloc[:, 1], color='k', alpha=.25)\r\n ax.fill_betweenx(ax.get_ylim(), pd.to_datetime(plot_start), ts.index[-1],\r\n alpha=.1, zorder=-1)\r\n ax.set_xlabel('Time')\r\n ax.set_ylabel(ts_label)\r\n plt.legend()\r\n plt.tight_layout()\r\n # if dynamic == False:\r\n # #plt.savefig(ts_label + '_one_step_pred.png', dpi=300)\r\n # else:\r\n #plt.savefig(ts_label + '_dynamic_pred.png', dpi=300)\r\n plt.show()\r\n\r\n def forecast(self, ts, n_steps, ts_label):\r\n # Get forecast n_steps ahead in future\r\n pred_uc = self.final_result.get_forecast(steps=n_steps)\r\n\r\n # Get confidence intervals of forecasts\r\n pred_ci = pred_uc.conf_int()\r\n ax = ts.plot(label='observed', figsize=(15, 10))\r\n pred_uc.predicted_mean.plot(ax=ax, label='Forecast in Future')\r\n ax.fill_between(pred_ci.index,\r\n pred_ci.iloc[:, 0],\r\n pred_ci.iloc[:, 1], color='k', alpha=.25)\r\n ax.set_xlabel('Time')\r\n ax.set_ylabel(ts_label)\r\n plt.tight_layout()\r\n # plt.savefig(ts_label + '_forcast.png', dpi=300)\r\n plt.legend()\r\n plt.show()\r\n \r\nif __name__ == \"__main__\":\r\n arima_para = {\"p\": range(2), \"d\": range(2), \"q\": range(2)}\r\n # the seasonal periodicy 24 hours, i.e. S=24*60/30 = 48 samples\r\n seasonal_para = round(24. * 60. / 30.)\r\n arima_model = Arima(arima_para, seasonal_para)\r\n # Fitting\r\n arima_model.fit(time_series_train, plot_start='2010-11-24 00:00:00', pred_start='2010-11-25 14:00:00', dynamic=True, ts_label=\"Train\")\r\n # Prediction\r\n arima_model.pred(time_series_test, plot_start='2010-11-24 00:00:00', pred_start='2010-11-25 14:00:00', dynamic=True, ts_label=\"Test\")\r\n # Forecasting\r\n arima_model.forecast(time_series_train, n_steps=100, ts_label=\"Forecast\")\r\n \r\n "
},
{
"alpha_fraction": 0.5029744505882263,
"alphanum_fraction": 0.5368015766143799,
"avg_line_length": 29.727598190307617,
"blob_id": "d5efadc553793927af3a74b7d523f5eb253dc130",
"content_id": "7f3809ca1750f40017b7f56b01714bd539713079",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8573,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 279,
"path": "/generic/machinelearning.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\n\nfrom MATLAB import *\n\n# %% ----- Curve fitting --------\ndef fit_double_exp(x, y, sort=False):\n \"\"\"\n Fitting y = b * exp(p * x) + c * exp(q * x)\n Implemented based on:\n Regressions et Equations Integrales by Jean Jacquelin\n \"\"\"\n if sort:\n # Sorting (x, y) such that x is increasing\n X, _ = sortrows(np.c_[x, y], col=0)\n x, y = X[:, 0], X[:, 1]\n # Start algorithm\n n = len(x)\n S = np.zeros_like(x)\n S[1:] = 0.5 * (y[:-1] + y[1:]) * np.diff(x)\n S = np.cumsum(S)\n SS = np.zeros_like(x)\n SS[1:] = 0.5 * (S[:-1] + S[1:]) * np.diff(x)\n SS = np.cumsum(SS)\n\n # Getting the parameters\n M = np.empty((4, 4))\n N = np.empty((4, 1))\n\n M[:, 0] = np.array([np.sum(SS**2), np.sum(SS * S), np.sum(SS * x), np.sum(SS)])\n\n M[0, 1] = M[1, 0]\n M[1:,1] = np.array([np.sum(S**2), np.sum(S * x), np.sum(S)])\n\n M[:2,2] = M[2, :2]\n M[2, 2] = np.sum(x**2)\n\n M[:3,3] = M[3,:3]\n M[3, 3] = n\n\n N[:, 0] = np.array([np.sum(SS * y), np.sum(S * y), np.sum(x * y), np.sum(y)])\n\n # Regression for p and q\n ABCD = np.matmul(np.linalg.inv(M), N)\n #set_trace()\n A, B, C, D = ABCD.flatten()\n p = 0.5 * (B + np.sqrt(B**2 + 4 * A))\n q = 0.5 * (B - np.sqrt(B**2 + 4 * A))\n\n # Regression for b, c\n I = np.empty((2, 2))\n J = np.empty((2, 1))\n\n beta = np.exp(p * x)\n eta = np.exp(q * x)\n I[0, 0] = np.sum(beta**2)\n I[1, 0] = np.sum(beta * eta)\n I[0, 1] = I[1, 0]\n I[1, 1] = np.sum(eta**2)\n\n\n J[:, 0] = [np.sum(y * beta), np.sum(y * eta)]\n\n bc = np.matmul(np.linalg.inv(I), J)\n b, c = bc.flatten()\n\n return b, c, p, q\n\ndef fit_double_exp_with_offset(x, y, sort=False):\n \"\"\"\n Fitting y = a + b * exp(p * x) + c * exp(q * x)\n Implemented based on:\n https://math.stackexchange.com/questions/2249200/exponential-regression-with-two-terms-and-constraints\n \"\"\"\n if sort:\n # Sorting (x, y) such that x is increasing\n X, _ = sortrows(np.c_[x, y], col=0)\n x, y = X[:, 0], X[:, 1]\n # Start algorithm\n n = len(x)\n S = np.zeros_like(x)\n S[1:] = 0.5 * (y[:-1] + y[1:]) * np.diff(x)\n S = np.cumsum(S)\n SS = np.zeros_like(x)\n SS[1:] = 0.5 * (S[:-1] + S[1:]) * np.diff(x)\n SS = np.cumsum(SS)\n\n # Getting the parameters\n M = np.empty((5, 5))\n N = np.empty((5, 1))\n\n M[:, 0] = np.array([np.sum(SS**2), np.sum(SS * S), np.sum(SS * x**2), np.sum(SS * x), np.sum(SS)])\n\n M[0, 1] = M[1, 0]\n M[1:,1] = np.array([np.sum(S**2), np.sum(S * x**2), np.sum(S * x), np.sum(S)])\n\n M[0, 2] = M[2, 0]\n M[1, 2] = M[2, 1]\n M[2:,2] = np.array([np.sum(x**4), np.sum(x**3), np.sum(x**2)])\n\n M[:3,3] = M[3,:3]\n M[3, 3] = M[4, 2]\n M[4, 3] = np.sum(x)\n\n M[:4, 4] = M[4, :4]\n M[4, 4] = n\n\n N[:, 0] = np.array([np.sum(SS * y), np.sum(S * y), np.sum(x**2 * y), np.sum(x * y), np.sum(y)])\n\n # Regression for p and q\n ABCDE = np.matmul(np.linalg.inv(M), N)\n A, B, C, D, E = ABCDE.flatten()\n p = 0.5 * (B + np.sqrt(B**2 + 4 * A))\n q = 0.5 * (B - np.sqrt(B**2 + 4 * A))\n\n # Regression for a, b, c\n I = np.empty((3, 3))\n J = np.empty((3, 1))\n\n I[0, 0] = n\n I[1, 0] = np.sum(np.exp(p * x))\n I[2, 0] = np.sum(np.exp(q * x))\n I[0, 1] = I[1, 0]\n I[1, 1] = np.sum(I[1, 0]**2)\n I[2, 1] = np.sum(I[1, 0] * I[2, 0])\n I[0, 2] = I[2, 0]\n I[1, 2] = I[2, 1]\n I[2, 2] = np.sum(I[2, 0]**2)\n\n J[:, 0] = [np.sum(y), np.sum(y * I[1, 0]), np.sum(y * I[2, 0])]\n\n abc = np.matmul(np.linalg.inv(I), J)\n a, b, c = abc.flatten()\n\n return a, b, c, p, q\n\ndef fit_gaussian_non_iter(x, y, sort=False):\n \"\"\"\n Fitting Gaussian y = 1 / (sigma * sqrt(2 * pi)) * exp( -1/2 * ( (x - mu) / sigma )^2 )\n using non-iterative method based on\n Regressions et Equations Integrales by Jean Jacquelin\n \"\"\"\n if sort:\n # Sorting (x, y) such that x is increasing\n X, _ = sortrows(np.c_[x, y], col=0)\n x, y = X[:, 0], X[:, 1]\n # Start algorithm\n S = np.zeros_like(x)\n S[1:] = 0.5 * (y[:-1] + y[1:]) * np.diff(x)\n S = np.cumsum(S)\n T = np.zeros_like(x)\n x_y = x * y\n T[1:] = 0.5 * ( x_y[:-1] + x_y[1:] ) * np.diff(x)\n T = np.cumsum(T)\n\n # S1 = np.zeros_like(x)\n # T1 = np.zeros_like(x)\n # for k in range(1, len(S1)):\n # S1[k] = S1[k-1] + 1/2 * (y[k] + y[k-1]) * (x[k] - x[k-1])\n # T1[k] = T1[k-1] + 1/2 * (y[k]*x[k] + y[k-1]*x[k-1]) * (x[k] - x[k-1])\n\n M = np.empty((2, 2))\n N = np.empty((2, 1))\n\n # Getting the parameters\n M[0, 0] = np.sum(S**2)\n M[0, 1] = np.sum(S * T)\n M[1, 0] = M[0, 1]\n M[1, 1] = np.sum(T**2)\n\n N[0, 0] = np.sum((y - y[0]) * S)\n N[1, 0] = np.sum((y - y[0]) * T)\n AB = np.matmul(np.linalg.inv(M), N)\n A = AB[0, 0]\n B = AB[1, 0]\n\n mu = - A / B\n\n sigma = np.sqrt(-1 / B)\n\n return mu, sigma\n\n# %% ----- K-means -------------\ndef elbow_curve(X, max_clusters=15, plot=False, *args, **kwargs):\n \"\"\"\n Return the elbow curve for K-means clustering\n\n Example:\n\n from sklearn.datasets import make_blobs\n X, y_varied = make_blobs(n_samples=100,\n cluster_std=[1.0, 2.5, 0.5],\n random_state=42)\n distortions, best_K = elbow_curve(X, max_clusters=10, plot=True)\n\n Inputs:\n * X: [n_samples, n_features]\n * max_clusters: max number of clusters to calculate. Default 15.\n * distance: distance metric used for clustering. Default 'euclidean'\n * *args, **kwargs: additional arguments for kmeans function\n\n Return:\n * distortions: arary of within-group sum of squares\n * best_k: best K value\n \"\"\"\n # Calculate the elbow curve\n distortions = np.zeros(max_clusters)\n for k in range(0, max_clusters):\n D = KMeans(n_clusters=k+1).fit(X)\n for s in range(k+1):\n distortions[k] = distortions[k] + \\\n np.sum((X[D.labels_==s, :] - D.cluster_centers_[s, :])**2)\n\n best_idx = find_point_closest(distortions)\n\n if plot:\n plt.plot(np.arange(1, max_clusters+1), distortions, '-o')\n plt.plot(best_idx+1, distortions[best_idx], 'ro')\n plt.xticks(np.arange(1, max_clusters+1))\n plt.xlabel('K')\n plt.ylabel('Distortions')\n\n return distortions, best_idx + 1 # return K\n\n\ndef find_point_closest(curve, plot=False):\n \"\"\"\n Given an elbow curve, find the index of best K\n (then best K would be this index + 1)\n Can also be applied to ROC curve\n https://stackoverflow.com/questions/2018178/finding-the-best-trade-off-point-on-a-curve\n \"\"\"\n # get coordinates of all the points\n nPoints = len(curve)\n allCoord = np.c_[np.arange(0, nPoints)+1, curve] # SO formatting\n\n # pull out first point\n firstPoint = allCoord[0,:]\n\n # get vector between first and last point - this is the line\n lineVec = allCoord[-1,:] - firstPoint\n\n # normalize the line vector\n lineVecN = lineVec / np.sqrt(np.sum(lineVec**2))\n\n # find the distance from each point to the line:\n # vector between all points and first point\n vecFromFirst = allCoord - firstPoint\n\n # To calculate the distance to the line, we split vecFromFirst into two\n # components, one that is parallel to the line and one that is perpendicular\n # Then, we take the norm of the part that is perpendicular to the line and\n # get the distance.\n # We find the vector parallel to the line by projecting vecFromFirst onto\n # the line. The perpendicular vector is vecFromFirst - vecFromFirstParallel\n # We project vecFromFirst by taking the scalar product of the vector with\n # the unit vector that points in the direction of the line (this gives us\n # the length of the projection of vecFromFirst onto the line). If we\n # multiply the scalar product by the unit vector, we have vecFromFirstParallel\n scalarProduct = vecFromFirst.dot(lineVecN)\n vecFromFirstParallel = np.matmul(scalarProduct[:, np.newaxis], lineVecN[np.newaxis, :])\n vecToLine = vecFromFirst - vecFromFirstParallel\n\n # distance to line is the norm of vecToLine\n distToLine = np.sqrt(np.sum(vecToLine**2,1));\n\n # plot the distance to the line\n # plt.plot(distToLine)\n\n # now all you need is to find the maximum\n idxOfBestPoint = np.argmax(distToLine)\n if plot:\n # plot\n plt.plot(curve)\n plt.plot(allCoord[idxOfBestPoint,0], allCoord[idxOfBestPoint,1], 'or')\n\n return idxOfBestPoint\n"
},
{
"alpha_fraction": 0.5647761225700378,
"alphanum_fraction": 0.5820895433425903,
"avg_line_length": 32.89583206176758,
"blob_id": "4b69c9b0f31117c989184cf88f872098f479da41",
"content_id": "da737f7688f5e1743bf09110def3597ffcf5f61b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1675,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 48,
"path": "/python_tutorials/Violent Python/Web_Recon.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 23 21:25:32 2014\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport mechanize, urllib2, cookielib, random, time\r\n\r\nclass anonBrowser(mechanize.Browser):\r\n def __init__(self, proxies = [], user_agents = []):\r\n mechanize.Browser.__init__(self)\r\n self.set_handle_robots(False)\r\n self.proxies = proxies\r\n self.user_agents = user_agents + ['Mozilla/12.0', 'FireFox/24.0',\r\n 'ExactSearch','iOS8.1']\r\n self.cookie_jar = cookielib.LWPCookieJar()\r\n self.set_cookiejar(self.cookie_jar)\r\n self.anonymize()\r\n def clear_cookies(self):\r\n self.cookie_jar = cookielib.LWPCookieJar()\r\n self.set_cookiejar(self.cookie_jar)\r\n def change_user_agent(self):\r\n index = random.randrange(0, len(self.user_agents))\r\n self.addheaders = [('User-agent', (self.user_agents[index]))]\r\n def change_proxy(self):\r\n if self.proxies:\r\n index = random.randrange(0, len(self.prproxies))\r\n self.set_proxies({'http': self.proxies[index]})\r\n def anonymize(self, sleep=False):\r\n self.clear_cookies()\r\n self.change_user_agent()\r\n self.change_proxy()\r\n if sleep:\r\n time.sleep(60)\r\n \r\ndef viewOnlineText(text_url):\r\n for line in urllib2.urlopen(text_url):\r\n return line.strip('\\n\\t .,;')\r\n break\r\n\r\nab = anonBrowser(proxies=[], user_agents=[('User-agent','superSecretBrowser')])\r\nfor attempt in range(1,5):\r\n ab.anonymize()\r\n print '[*] Fetching page ...'\r\n response = ab.open('http://kittenwar.com')\r\n for cookie in ab.cookie_jar:\r\n print cookie\r\n"
},
{
"alpha_fraction": 0.5298022627830505,
"alphanum_fraction": 0.5432072877883911,
"avg_line_length": 45.47196960449219,
"blob_id": "b66325cbc461d9785e98a1bc279505275f701463",
"content_id": "35331a79deafd70c84c3e7a62b0d25c605ae148d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 62663,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 1320,
"path": "/Plots/PublicationFigures.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 06 13:35:08 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\nDEBUG = True\r\n\r\nimport os\r\nimport numpy as np\r\nfrom ImportData import FigureData\r\n#import matplotlib\r\n#matplotlib.use('Agg') # use 'Agg' backend\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.font_manager as fm\r\nfrom matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker, AuxTransformBox\r\n# import matplotlib.ticker as tic\r\n\r\nplotType = 'neuro'\r\nstyle = 'concatenated'\r\nexampleFolder = 'C:/Users/Edward/Documents/Assignments/Scripts/Python/Plots/example/'\r\n\r\n# global variables\r\n# fontname = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resource/Helvetica.ttf')) # font .ttf file path\r\nfontname = 'Arial'\r\nfontsize = {'title':16, 'xlab':12, 'ylab':12, 'xtick':10,'ytick':10, 'texts':8,\r\n 'legend': 12, 'legendtitle':12} # font size\r\ncolor = ['#1f77b4','#ff7f0e', '#2ca02c','#d62728','#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd154','#17becf'] # tableau10, or odd of tableau20\r\nmarker = ['o', 's', 'd', '^', '*', 'p']# scatter plot line marker cycle\r\nhatch = ['/','\\\\','-', '+', 'x', 'o', 'O', '.', '*'] # fill patterns potentially used for filled objects such as bars\r\nyunit_dict = {'Volt':'mV','Cur':'pA','Stim':'pA'} # trace yunit dictionary\r\ncanvas_size = (6,5) # for a single plot\r\n\r\nclass PublicationFigures(object):\r\n \"\"\"Generate publicatino quantlity figures\r\n Data: FigureData, or data file path\r\n PlotType: currently supported plot types include:\r\n ~ LinePlot: for categorical data, with error bar\r\n style:\r\n 'Twin' -- Same plot, 2 y-axis (left and right of plot)\r\n 'Vstacked' (default) -- vertically stacked subplots\r\n ~ Beeswarm: beeswarm plot; boxplot with scatter points\r\n style: 'hex','swarm' (default),'center','square'\r\n \"\"\"\r\n def __init__(self, dataFile=None, savePath=None, *args, **kwargs):\r\n \"\"\"Initialize class\r\n \"\"\"\r\n if isinstance(dataFile, (str,list,tuple,np.ndarray)):\r\n self.LoadData(dataFile, *args, **kwargs) # load data\r\n elif isinstance(dataFile, FigureData):\r\n self.data = dataFile\r\n self.savePath = savePath\r\n self.cache=0 # for progressive draw of objects\r\n\r\n def LoadData(self, dataFile, *args, **kwargs):\r\n \"\"\"To be called after object creation\"\"\"\r\n self.data = FigureData(dataFile, *args, **kwargs)\r\n # Set some variables to help with indexing\r\n g = globals()\r\n for item in ['x','y','z','by']:\r\n g['_'+item] = self.data.meta[item] \\\r\n if item in self.data.meta else None\r\n\r\n def AdjustFigure(canvas_size=canvas_size, tight_layout=True):\r\n \"\"\"Used as a decotrator to set the figure properties\"\"\"\r\n def wrap(func):\r\n def wrapper(self, *args, **kwargs):\r\n res = func(self, *args, **kwargs)#execute the function as usual\r\n self.SetFont() # adjust font\r\n if canvas_size is not None and \\\r\n str(canvas_size).lower()!='none':\r\n self.fig.set_size_inches(canvas_size) # set figure size\r\n if tight_layout:\r\n self.fig.tight_layout() # tight layout\r\n return(res)\r\n return(wrapper)\r\n return(wrap)\r\n\r\n def AdjustAxs(otypes=[np.ndarray], excluded=None):\r\n \"\"\"Used as a decorator to set the axis properties\"\"\"\r\n def wrap(func):\r\n # vectorize the func so that it can be applied to single axis or\r\n # multiple axes\r\n func_vec = np.vectorize(func, otypes=otypes, excluded=excluded)\r\n def wrapper(self, ax=None, *args, **kwargs):\r\n if ax is None: # if not specified, use default axis\r\n res = func_vec(self.axs, *args, **kwargs)\r\n else:\r\n res = func_vec(ax, *args, **kwargs)\r\n return(res)\r\n return(wrapper)\r\n return(wrap)\r\n\r\n def Save(self,savePath=None, dpi=300):\r\n \"\"\"\r\n savePath: full path to save the image. Image type determined by file\r\n extention\r\n dpi: DPI of the saved image. Default 300.\r\n \"\"\"\r\n if savePath is not None: # overwrite with new savePath\r\n self.savePath = savePath\r\n if self.savePath is None: # save to current working directory\r\n self.savePath = os.path.join(os.getcwd(),'Figure.eps')\r\n self.fig.savefig(self.savePath, bbox_inches='tight', dpi=dpi)\r\n\r\n \"\"\" ####################### Plot utilities ####################### \"\"\"\r\n @AdjustFigure(canvas_size=(50,5), tight_layout=False)\r\n def Traces(self, outline='vertical', scaleref='scalebar', scalepos='last',\r\n annotation=None, annstyle='last', color=['#000000', '#ff0000',\r\n '#0000ff','#ffa500', '#007f00','#00bfbf', '#bf00bf']):\r\n \"\"\"Plot time series / voltage and current traces\r\n outline: connfigurations of the plot. Default 'vertical'\r\n - 'vertical': arrange the traces vertically, each as a subplot.\r\n - 'horizontal': arrange the traces horizontally, concatenating the\r\n traces along time\r\n - 'overlap': Plot all the traces in the same axis, cycling through\r\n the color list.\r\n - np.ndarray specify the position of the index spanning a grid\r\n e.g. [[0,1],[2,3],[4,5]] would specify data[0] at first row,\r\n first column; data[1] at first row, second column; data[2] at\r\n second row, first column, ... and so on\r\n scaleref: style of scale reference\r\n - 'scalebar': use scale bar (Default)\r\n - 'axis': use axis\r\n scalepos: where to set scale reference.\r\n - 'last': (Default) set scale reference only in the last row and\r\n last column of the subplots. Note that if scaleref is 'axis',\r\n vertical scales will be shown in every subplot, while\r\n horitontal scale will not be set until the last subplots\r\n - 'each': set a scale reference at each subplot. Note that if\r\n scaleref is 'axis', both vertical and horizontal scales will\r\n be shown in every subplots.\r\n horitontal scale will not be set until the last subplots\r\n annotation: a string or list of strings to put into the annotation.\r\n The annotation text are also parsed from data.meta['annotation'].\r\n annstyle: style of annotation.\r\n - 'last': (Default) annotation all together after plotting the\r\n traces. Print each item in the list of annotations as a line\r\n - 'each': each subplot of traces gets an annotation next to it\r\n color: default MATLAB's color scheme\r\n \"\"\"\r\n import matplotlib.gridspec as gridspec\r\n # Define the layout of a single canvas\r\n gs = gridspec.GridSpec(2,3, width_ratios = [1, 50, 3], height_ratios=[7,1])\r\n # Set up all the axes\r\n ax = dict()\r\n fig = plt.figure()\r\n ax['trace'] = fig.add_subplot(gs[1])\r\n ax['initial'] = fig.add_subplot(gs[0], sharex=ax['trace'])\r\n ax['scalebar'] = fig.add_subplot(gs[2], sharex=ax['trace'])\r\n ax['annotation'] = fig.add_subplot(gs[3:], sharex=ax['trace'])\r\n #ax = list(ax.values())\r\n\r\n # text annotation\r\n ax['annotation'].text(0., 0.5, self.data.meta['notes'][0])\r\n # scalebar\r\n self.AddTraceScaleBar(xunit='ms', yunit='mV', color='k', ax=ax['scalebar'])\r\n # initial value\r\n ax['initial'].text(0., 0.5, '%0.2fmV'%(self.data.table[0]['VoltA'][0]))\r\n\r\n plt.gcf().set_size_inches(6,3)\r\n # parse subplot configuration\r\n if outline is None:\r\n tmp = np.array(_y)\r\n if tmp.ndim<2: tmp = tmp[np.newaxis,:]\r\n outline =np.arange(0,np.array(_y).size).reshape(tmp.shape)\r\n elif outline == 'vertical':\r\n outline = np.arange(0, np.array(_y).size)[:,np.newaxis]\r\n elif outline == 'horizontal':\r\n outline = np.arange(0,np.array(_y).size)[np.newaxis,:]\r\n elif outline == 'overlap':\r\n outline = np.array([[0]])\r\n elif isinstance(outline, (np.ndarray,list,tuple)):\r\n outline = np.asarray(outline)\r\n if outline.ndim<2: outline = outline[np.newaxis,:] # horizontal\r\n else:\r\n raise(TypeError('Unrecognized plot configuration'))\r\n\r\n nrows, ncols = outline.shape\r\n # Create figure and axis\r\n # use gridspec instead, then use additional grid to add scalebar\r\n self.fig, self.axs = plt.subplots(nrows=nrows,ncols=ncols,sharex=False, sharey=False)\r\n\r\n for n in np.nditer(outline):\r\n if n is None: continue # if no data, continue to next loop\r\n n = int(n)\r\n r,c = np.unravel_index(n, _y.shape, order='C')\r\n ax = self.axs[n] if self.axs.ndim<2 else self.axs[r, c]\r\n x, y = _x[r], _y[r,c]\r\n # for lazy error handling\r\n xlabel=self.get_field(self.data.meta,'xlabel',r)\r\n ylabel=self.get_field(self.data.meta,'ylabel',r,c)\r\n xunit=self.get_field(self.data.meta,'xunit',r)\r\n yunit=self.get_field(self.data.meta,'yunit',r,c)\r\n annotation=self.get_field(self.data.meta,'annotation',r)\r\n # parse scale reference\r\n if scalepos == 'each':\r\n ref = scaleref\r\n elif scalepos == 'last':\r\n if (ncols==1 and ax.is_last_row()) or \\\r\n (nrows==1 and ax.is_last_col()) or \\\r\n (ncols>1 and nrows>1 and \\\r\n (ax.is_last_row() or ax.is_last_col())):\r\n ref = scaleref\r\n else: # not the last line yet\r\n ref = 'yaxis' if scaleref == 'axis' else None\r\n\r\n # do the plot\r\n self.TimeSeries(self.data.table[r][x], self.data.table[r][y],\r\n ax=ax,xunit=xunit,yunit=yunit,\r\n xlabel=xlabel,ylabel=ylabel,\r\n scaleref=ref, annotation=annotation \\\r\n if annstyle=='each' else None)\r\n # set aspect ratio\r\n #self.SetAspectRatio(r=2, adjustable='box-forced')\r\n plt.subplots_adjust(wspace=0.01)\r\n self.TurnOffAxis()\r\n\r\n if annstyle == 'last': # ['last','each']\r\n self.TextAnnotation(text=annotation) # description of the trace\r\n\r\n def TimeSeries(self, X, Y, ax=None, xunit='ms', yunit='mV',\r\n xlabel=None, ylabel=None, scaleref='scalebar',\r\n annotation=None, color='k'):\r\n \"\"\"Make a single time series plot\"\"\"\r\n if ax is None: ax = self.axs\r\n hline = ax.plot(X, Y, color=color)[0]\r\n if scaleref == 'scalebar': # Use scale bar instead of axis\r\n self.AddTraceScaleBar(xunit=xunit, yunit=yunit, color=hline, ax=ax)\r\n elif scaleref == 'yaxis': # use only y axis\r\n self.ShowOnlyYAxis(ax)\r\n ax.set_ylabel(ylabel)\r\n elif scaleref == 'axis': # Use axis\r\n self.SetDefaultAxis(ax) # use default axis\r\n ax.set_xlabel(xlabel)\r\n ax.set_ylabel(ylabel)\r\n else: # do not draw any reference on this axis\r\n self.TurnOffAxis(ax)\r\n\r\n @AdjustFigure(tight_layout=False)\r\n def SingleEpisodeTraces(self, color='k',channels=['A'],\r\n streams=['Volt','Cur']):\r\n \"\"\"Helper function to export traces from a single episode.\r\n Arrange all plots vertcially\"\"\"\r\n self.fig, self.axs = plt.subplots(nrows=len(channels)*len(streams),\r\n ncols=1, sharex=True)\r\n\r\n pcount = 0 # count plots\r\n\r\n for c in channels: # iterate over channels\r\n for s in streams: # iterate over streams\r\n self.axs[pcount].plot(self.data.table['time'],\r\n self.data.table[s+c],\r\n label=pcount, c=color)\r\n self.AddTraceScaleBar(xunit='ms', yunit=yunit_dict[s],\r\n ax=self.axs[pcount])\r\n position = [0, self.data.table[s+c][0]]\r\n text = '%.0f'%(position[1]) + yunit_dict[s]\r\n self.TextAnnotation(text=text, position=position,\r\n ax=self.axs[pcount], color=color,\r\n xoffset='-', yoffset=None, fontsize=None,\r\n ha='right',va='center')\r\n pcount += 1\r\n\r\n # Finally, annotate the episode information at the bottom\r\n pad = np.array(self.axs[-1].get_position().bounds[:2]) *\\\r\n np.array([1.0, 0.8])\r\n self.fig.text(pad[0], pad[1], self.data.meta['notes'],\r\n ha='left',va='bottom')\r\n\r\n @AdjustFigure(tight_layout=False)\r\n def MultipleTraces(self, window=None, color=color, channel='A',\r\n stream='Volt'):\r\n \"\"\"Helper function to draw multiple traces in a single axis\"\"\"\r\n # initialize figure\r\n self.fig, self.axs = plt.subplots(nrows=1, ncols=1)\r\n\r\n nep = len(self.data.table)\r\n\r\n # Draw plots\r\n for n in range(nep):\r\n x,y=self.data.table[n]['time'],self.data.table[n][stream+channel]\r\n ts = x[1] - x[0]\r\n if window is not None:\r\n x, y = x[int(window[0]/ts) : int(window[1]/ts)], \\\r\n y[int(window[0]/ts) : int(window[1]/ts)]\r\n\r\n self.axs.plot(x, y, label=n, c=color[n%nep])\r\n if n == (nep-1): # add trace bar for the last episode\r\n K.AddTraceScaleBar(xunit='ms', yunit=yunit_dict[stream],\r\n ax=self.axs)\r\n if n == 0: # annotate the first episode\r\n dataposition = [np.array(x)[0], np.array(y)[0]]\r\n datatext = '%.0f'%(dataposition[1]) + yunit_dict[stream]\r\n K.TextAnnotation(text=datatext, position=dataposition,\r\n ax=self.axs, color='k',xoffset='-',\r\n yoffset=None, fontsize=None, ha='right',\r\n va='center')\r\n\r\n # update the graph\r\n self.fig.canvas.draw()\r\n # Finally, annotate the episode information at the bottom\r\n pad = np.array(self.axs.get_position().bounds[:2])*np.array([1.0, 0.8])\r\n # gap between lines of text annotation in the bottom, in units of\r\n # figure (0,1)\r\n inc = 0.025\r\n\r\n for n in range(nep):\r\n # print(pad[0], pad[1]+inc*n)\r\n self.fig.text(pad[0], pad[1]-inc*n, self.data.meta['notes'][n],\r\n ha='left',va='bottom', color=color[n%len(color)])\r\n\r\n @AdjustFigure(canvas_size='none', tight_layout=False)\r\n def ConcatenatedTraces(self, color='k', channel='A',\r\n stream='Volt', gap=0.05):\r\n \"\"\"Heper function to export horizontally concatenated traces\r\n channel, stream: both string. Can only plot one data stream at a time.\r\n gap: gap between consecutive plots. gap * duration of plot. Default\r\n is 0.05, or 5% of the duration of the plot.\r\n \"\"\"\r\n # initialize figure\r\n self.fig, self.axs = plt.subplots(nrows=1, ncols=1)\r\n # initialize time shift\r\n x0 = 0.0\r\n nep = len(self.data.table) # number of data sets to plot\r\n # gap between lines of text annotation in the bottom, in units of\r\n # figure (0,1)\r\n inc = 0.025\r\n # Calculate the gap between plots: will probably result bottleneck\r\n gap *= max([x['time'].iloc[-1] - x['time'].iloc[0]\r\n for x in self.data.table])\r\n\r\n # Draw plots\r\n for n in range(nep):\r\n x,y=self.data.table[n]['time'],self.data.table[n][stream+channel]\r\n # update time shift\r\n x = x + x0\r\n x0 = x.iloc[-1] + gap\r\n\r\n self.axs.plot(x, y, label=n, c=color)\r\n\r\n if n == 0: # annotate the first episode\r\n dataposition = [np.array(x)[0], np.array(y)[0]]\r\n datatext = '%.0f'%(dataposition[1]) + yunit_dict[stream]\r\n self.TextAnnotation(text=datatext, position=dataposition,\r\n ax=self.axs, color='k',xoffset='-',\r\n yoffset=None, fontsize=None,ha='right',\r\n va='center')\r\n\r\n # add trace bar for the last episode\r\n self.AddTraceScaleBar(xunit='ms', yunit=yunit_dict[stream],\r\n ax=self.axs, xscale=x.iloc[-1]-x.iloc[0])\r\n # update the graph\r\n self.fig.canvas.draw()\r\n # Finally, annotate the episode information at the bottom\r\n pad = np.array(self.axs.get_position().bounds[:2])*np.array([1.0, 0.8])\r\n\r\n for n in range(nep):\r\n self.fig.text(pad[0], pad[1]-inc*n, self.data.meta['notes'][n],\r\n ha='left', va='bottom', color=color)\r\n\r\n # Set appropriate canvas size\r\n self.fig.set_size_inches(canvas_size[0]*nep,canvas_size[1])\r\n\r\n @AdjustFigure()\r\n def Scatter(self, color=color, marker=marker, alpha=0.5, legend_on=True):\r\n \"\"\"2D Scatter plot\r\n color = blue, magenta, purple, orange, green\r\n marker = circle, pentagon, pentagram star,star, + sign\r\n \"\"\"\r\n global _x, _y, _by\r\n self.fig, self.axs = plt.subplots(nrows=1,ncols=1)\r\n # Get number of groups\r\n group = np.unique(self.data.table[_by]) if _by is not None else [1]\r\n for n,gp in enumerate(group):\r\n label = self.get_field(self.data.meta,'legend',n)\r\n # select subset of data rows\r\n series = self.data.table[self.data.table[_by]==gp] \\\r\n if _by is not None else self.data.table\r\n # plot\r\n plt.scatter(series[_x], series[_y], alpha=alpha, s=50,\r\n marker=marker[n%len(marker)],\r\n color=color[n%len(color)],\r\n label=label[n] if label is not None else None)\r\n self.SetDefaultAxis()\r\n self.axs.set_xlabel(self.data.meta['xlabel'])\r\n self.axs.set_ylabel(self.data.meta['ylabel'])\r\n if legend_on and label is not None :\r\n self.axs.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\n @AdjustFigure()\r\n def Scatter3D(self, color='k', marker=['.', '+', 'x', (5, 2), '4']):\r\n from mpl_toolkits.mplot3d import Axes3D # for 3D plots\r\n self.fig = plt.figure()\r\n self.axs = self.fig.add_subplot(111, projection='3d')\r\n color=list(color)\r\n global _x, _y, _z, _by\r\n # Get number of groups\r\n group = np.unique(self.data.table[_by]) if _by is not None else [1]\r\n for n,gp in enumerate(group):\r\n label = self.get_field(self.data.meta,'legend',n)\r\n # select subset of data rows\r\n series = self.data.table[self.data.table[_by]==gp] \\\r\n if _by is not None else self.data.table\r\n # plot\r\n self.axs.scatter(series[_x], series[_y], series[_z],\r\n label=label, zdir=u'z', s=144, #sqrt(s) point font\r\n c = color[n%len(color)],\r\n marker=marker[n%len(marker)],\r\n depthshade=True)\r\n self.axs.set_xlabel(self.data.meta['xlabel'])\r\n self.axs.set_ylabel(self.data.meta['ylabel'])\r\n self.axs.set_zlabel(self.data.meta['zlabel'])\r\n # Add annotations\r\n #self.AddRegions()\r\n self.SetDefaultAxis3D() # default axis, view, and distance\r\n\r\n @AdjustFigure()\r\n def BarPlot(self, style='Vertical', width=0.27, gap=0, space=0.25,\r\n color=color, hatch=None, alpha=0.4, linewidth=0):\r\n \"\"\"Plot bar graph\r\n style: style of bar graph, can choose 'Vertical' and 'Horizontal'\r\n width: width of bar. Default 0.27\r\n gap: space between bars. Default 0.\r\n space: distances between categories. Deafult 0.25\r\n \"\"\"\r\n global _x, _y, _z, _by\r\n # initialize plot\r\n self.fig, self.axs = plt.subplots(nrows=1,ncols=1, sharex=True)\r\n # Get number of groups\r\n group = np.unique(self.data.table[_by]) if _by is not None else [1]\r\n # Center of each category\r\n ns = len(group) # number of series\r\n inc = space+(ns-1)*gap+ns*width\r\n self.x = np.arange(0,len(np.unique(self.data.table[_x]))*inc,inc)\r\n # leftmost position of bars\r\n pos = self.x-ns/2*width - (ns-1)/2*gap\r\n\r\n for n,gp in enumerate(group):\r\n label = self.get_field(self.data.meta,'legend',n)\r\n # select subset of data rows\r\n series = self.data.table[self.data.table[_by]==gp] \\\r\n if _by is not None else self.data.table\r\n err = self.data.parse_errorbar(series) # get errorbar\r\n pos = pos if n==0 else pos+width+gap\r\n if style=='Vertical':\r\n bars = self.axs.bar(pos[:series.shape[0]], series[_y],\r\n width, yerr=err, alpha=alpha,\r\n align='center', label=label)\r\n else:\r\n bars = self.axs.barh(pos[:series.shape[0]], series[_y],\r\n width, yerr=err, alpha=alpha,\r\n align='center', label=label)\r\n # Set color\r\n self.SetColor(bars, color, n, linewidth)\r\n # Set hatch if available\r\n self.SetHatch(bars, hatch, n)\r\n self.SetDefaultAxis()\r\n if style=='Vertical':\r\n self.SetCategoricalXAxis()\r\n self.AdjustBarPlotXAxis()\r\n else: # horizontal\r\n self.AdjustBarPlotYAxis()\r\n self.SetCategoricalYAxis()\r\n # Set labels\r\n self.axs.set_xlabel(self.data.meta['xlabel'])\r\n self.axs.set_ylabel(self.data.meta['ylabel'])\r\n\r\n if n>0: # for multiple series, add legend\r\n self.axs.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\n @AdjustFigure()\r\n def Boxplot(self, color=color):\r\n \"\"\"boxplot\"\"\"\r\n # initialize plot\r\n self.fig, self.axs = plt.subplots(nrows=1,ncols=1, sharex=True)\r\n self.x = [0,1]\r\n self.axs.boxplot(np.array(self.data.table[_y]).T)\r\n self.SetDefaultAxis()\r\n\r\n @AdjustFigure(canvas_size=None)\r\n def Beeswarm(self, style= \"swarm\",color=color, theme='cluster', **kwargs):\r\n \"\"\"Beeswarm style boxplot\r\n * style: beeswarm dot style,['swarm' (default),'hex','center','square']\r\n * theme: ['cluster' (Default), 'group', 'multi', 'floral'].\r\n Details see beeswarm doc string\r\n \"\"\"\r\n from simple.beeswarm import beeswarm\r\n global _x, _y, _by\r\n # initialize plot\r\n self.fig, self.axs = plt.subplots(nrows=1,ncols=1, sharex=True)\r\n # get some label parameters\r\n group = self.get_field(self.data.meta, 'group')\r\n legend = self.get_field(self.data.meta, 'legend')\r\n legendtitle = self.get_field(self.data.meta, 'legendtitle')\r\n # Do the plot\r\n self.axs, _ = beeswarm(_y, df=self.data.table, group=_x, cluster=_by,\\\r\n method=style,ax=self.axs, color=color,\\\r\n colortheme=theme, figsize=canvas_size,\\\r\n legend=legend, legendtitle=legendtitle, \\\r\n labels=group, **kwargs)\r\n\r\n # Format style\r\n # make sure axis tickmark points out\r\n self.axs.tick_params(axis='both',direction='out')\r\n self.axs.spines['right'].set_visible(False)\r\n self.axs.spines['top'].set_visible(False)\r\n self.axs.xaxis.set_ticks_position('bottom')\r\n self.axs.yaxis.set_ticks_position('left')\r\n # Set Y label, if exsit\r\n try:\r\n self.axs.set_ylabel(self.data.meta['ylabel'])\r\n except:\r\n pass\r\n\r\n @AdjustFigure()\r\n def Violinplot(self, color=color):\r\n \"\"\"violin plot / boxplot\"\"\"\r\n\r\n @AdjustFigure()\r\n def Histogram(self, style='Hstack'):\r\n \"\"\"Plot histogram\"\"\"\r\n n, bins, patches = P.hist(x, 50, normed=1, histtype='stepfilled')\r\n P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)\r\n return\r\n\r\n def HistogramHstack(self):\r\n return\r\n\r\n @AdjustFigure()\r\n def LinePlot(self,style='Vstack',xtime='categorical',margins=(0.,0.25)):\r\n \"\"\"Line plots with errorbars\r\n style: ['Vstack' (default), 'Twin'] style of multiple subplots.\r\n - 'Vstack': vertically stacked subplots\r\n - 'Twin': can handle only up to 2 subplots\r\n xtime: used to plot time series with errorbars. Specify an array of\r\n time points.\r\n \"\"\"\r\n # set categorical x\r\n self.x = list(self.data.table.index) if xtime=='categorical' else xtime\r\n global _x, _y\r\n _y = [_y] if isinstance(_y, str) else _y\r\n if style=='Twin' and len(_y) == 2:\r\n self.LinePlotTwin()\r\n else:\r\n self.LinePlotVstack()\r\n # Must set margins before setting aspect ratio\r\n self.SetMargins(x=margins[0], y=margins[1])\r\n # Set aspect ratio\r\n self.SetAspectRatio(r=2, adjustable='box-forced', margins=margins)\r\n if xtime == 'categorical':\r\n self.SetCategoricalXAxis() # make some space for each category\r\n\r\n def LinePlotVstack(self):\r\n \"\"\" Line plots stacked vertically\"\"\"\r\n self.fig, self.axs = plt.subplots(nrows=len(_y), ncols=1, sharex=True)\r\n self.axs = np.array([self.axs]) if len(_y)<2 else self.axs\r\n err = self.data.parse_errorbar(simplify=False) # get errorbar\r\n for n, ax in enumerate(self.axs):\r\n # Plot error bar\r\n ax.errorbar(self.x,self.data.table[_y[n]], color='k',yerr = err[n])\r\n self.axs = self.axs[0] if len(_y)<2 else self.axs\r\n self.SetVstackAxis() # set vertical stacked subplot axes\r\n\r\n def LinePlotTwin(self, color=('k','r')):\r\n \"\"\" Line plots with 2 y-axis\"\"\"\r\n self.fig, self.axs = plt.subplots()\r\n # Must set label on the first axis in order to show up in the plot\r\n self.axs.set_xlabel(self.data.meta['xlabel'])\r\n # Construct another axis sharing xaxis with current axis\r\n self.axs = np.array([self.axs, self.axs.twinx()])\r\n err = self.data.parse_errorbar(simplify=False) # get error bar\r\n self.SetTwinPlotAxis(color=color) # set twin plot subplot axes\r\n for n, ax in enumerate(self.axs):\r\n # Plot error bar\r\n ax.errorbar(np.array(self.x), np.array(self.data.table[_y[n]]),\r\n color=color[n], yerr=err[n])\r\n\r\n \"\"\" ####################### Axis schemas ####################### \"\"\"\r\n @AdjustAxs()\r\n def SetDefaultAxis(ax):\r\n \"\"\"Set default axis appearance\"\"\"\r\n ax.tick_params(axis='both',direction='out')\r\n ax.spines['left'].set_visible(True)\r\n ax.spines['left'].set_capstyle('butt')\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['bottom'].set_visible(True)\r\n ax.spines['bottom'].set_capstyle('butt')\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.set_ticks_position('left')\r\n\r\n @AdjustAxs()\r\n def SetDefaultAxis3D(ax, elev=45, azim=60, dist=12):\r\n ax.tick_params(axis='both', direction='out')\r\n ax.view_init(elev=elev, azim=azim) # set perspective\r\n ax.dist = dist # use default axis distance 10\r\n if ax.azim > 0: # z axis will be on the left\r\n ax.zaxis.set_rotate_label(False) # prevent auto rotation\r\n a = ax.zaxis.label.get_rotation()\r\n ax.zaxis.label.set_rotation(90+a) # set custom rotation\r\n ax.invert_xaxis() # make sure (0,0) in front\r\n ax.invert_yaxis() # make sure (0,0) in front\r\n else:\r\n ax.invert_xaxis() # make sure (0,0) in front\r\n #ax.zaxis.label.set_color('red')\r\n #ax.yaxis._axinfo['label']['space_factor'] = 2.8\r\n\r\n @AdjustAxs()\r\n def TurnOffAxis(ax):\r\n \"\"\"Turn off all axis\"\"\"\r\n ax.spines['left'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['bottom'].set_visible(False)\r\n ax.xaxis.set_visible(False)\r\n ax.yaxis.set_visible(False)\r\n\r\n @AdjustAxs()\r\n def ShowOnlyXAxis(ax):\r\n \"\"\"Turn off all axis but only X axis\"\"\"\r\n ax.spines['left'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['bottom'].set_visible(True)\r\n ax.xaxis.set_visible(True)\r\n ax.yaxis.set_visible(False)\r\n\r\n @AdjustAxs()\r\n def ShowOnlyYAxis(ax):\r\n \"\"\"Turn off all axis but only Y axis\"\"\"\r\n ax.spines['left'].set_visible(True)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['bottom'].set_visible(False)\r\n ax.xaxis.set_visible(False)\r\n ax.yaxis.set_visible(True)\r\n\r\n def AdjustBarPlotXAxis(self):\r\n \"\"\"Adjust bar plot's x axis for categorical axis\"\"\"\r\n # get y axis extent\r\n ymin, ymax = self.axs.get_ybound()\r\n if ymax <= 0.0: # only negative data present\r\n # flip label to top\r\n self.axs.spines['bottom'].set_position('zero') # zero the x axis\r\n self.axs.tick_params(labelbottom=False, labeltop=True)\r\n elif ymin >= 0.0: # only positive data present. Default\r\n self.axs.spines['bottom'].set_position('zero') # zero the x axis\r\n else: # mix of positive an negative data : set all label to bottoms\r\n self.axs.spines['bottom'].set_visible(False)\r\n self.axs.spines['top'].set_visible(True)\r\n self.axs.spines['top'].set_position('zero')\r\n self.axs.xaxis.set_ticks_position('none')\r\n\r\n def AdjustBarPlotYAxis(self):\r\n \"\"\"Adjust bar plot's y axis for categorical axis\"\"\"\r\n #set all label to left\r\n self.axs.spines['left'].set_visible(False)\r\n self.axs.spines['right'].set_visible(True)\r\n self.axs.spines['right'].set_position('zero')\r\n self.axs.yaxis.set_ticks_position('none')\r\n\r\n def SetTwinPlotAxis(self, color=('k', 'r')):\r\n \"\"\"Axis style of 2 plots sharing y axis\"\"\"\r\n spineName = ('left','right')\r\n for n, ax in enumerate(self.axs): # For twin Plot\r\n ax.tick_params(axis='both',direction='out') # tick mark out\r\n ax.spines['top'].set_visible(False) # remove top boundary\r\n ax.xaxis.set_ticks_position('bottom') # keep only bottom ticks\r\n ax.set_ylabel(self.data.meta['ylabel'][n]) # set y label\r\n ax.yaxis.label.set_color(color[n]) # set y label color\r\n ax.tick_params(axis='y',color=color[n]) # set y tick color\r\n [tl.set_color(color[n]) for tl in ax.get_yticklabels()]\r\n ax.spines[spineName[n]].set_color(color[n]) # set y spine color\r\n self.axs[0].spines['right'].set_visible(False) # leave only left spine\r\n self.axs[1].spines['left'].set_visible(False) # only only right spine\r\n\r\n @AdjustAxs()\r\n def PadY(ax):\r\n \"\"\"Set extra padding if data points / lines are cut off\"\"\"\r\n arr = np.array([l.get_ydata() for l in ax.lines])\r\n MAX, MIN = np.max(arr), np.min(arr)\r\n ytick_arr = ax.get_yticks()\r\n inc = np.mean(np.diff(ytick_arr)) # extra padding\r\n if np.min(ytick_arr)>=MIN:\r\n ax.set_ylim(MIN-inc, ax.get_ylim()[-1])\r\n if np.max(ytick_arr)<=MAX:\r\n ax.set_ylim(ax.get_ylim()[0], MAX+inc)\r\n\r\n def SetVstackAxis(self):\r\n \"\"\"Axis style of vertically stacked subplots\"\"\"\r\n def SVsA(ax, n):\r\n ax.tick_params(axis='both', direction='out') #tick mark out\r\n ax.spines['top'].set_visible(False) # remove top boundary\r\n ax.spines['right'].set_visible(False) # remove right spine\r\n ax.yaxis.set_ticks_position('left') # keep only left ticks\r\n ax.set_ylabel(self.data.meta['ylabel'][n]) # set different y labels\r\n if ax.is_last_row(): #keep only bottom ticks\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.set_xlabel(self.data.meta['xlabel']) # x label\r\n else:\r\n ax.xaxis.set_visible(False)\r\n ax.spines['bottom'].set_visible(False)\r\n SVsA_vec = np.frompyfunc(SVsA,2,1)\r\n num_axs = len(self.axs) if isinstance(self.axs, np.ndarray) else 1\r\n SVsA_vec(self.axs, range(num_axs))\r\n self.fig.tight_layout(h_pad=0.01) # pad height\r\n\r\n def SetHstackAxis(self):\r\n \"\"\"Axis style of horizontally stacked / concatenated subplots\"\"\"\r\n def SHsA(ax, n):\r\n ax.tick_params(axis='both', direction='out') # tick mark out\r\n ax.spine['top'].set_visible(False) #remove top boundary\r\n ax.spine['right'].set_visible(False) # remove right spine\r\n ax.yaxis.set_ticks_position('left') # keep only left ticks\r\n ax.set_xlabel(self.data.meta['xlabel'][n]) # set different x labels\r\n if ax.is_first_col(): # keep only first ticks\r\n ax.yaxis.set_ticks_position('left')\r\n ax.set_ylabel(self.data.meta['ylabel'][0]) # y label\r\n else:\r\n ax.yaxis.set_visible(False)\r\n ax.spines['left'].set_visible(False)\r\n SHsA_vec = np.frompyfunc(SHsA,2,1)\r\n num_axs = len(self.axs) if isinstance(self.axs, np.ndarray) else 1\r\n SHsA_vec(self.axs, range(num_axs))\r\n self.fig.tight_layout(w_pad=0.01) # pad width\r\n\r\n def SetCategoricalXAxis(self, ax=None):\r\n \"\"\"Additional settings for plots with categorical data\"\"\"\r\n # change the x lim on the last, most buttom subplot\r\n if ax is None: # last axis, or self.axs is a single axis\r\n ax = self.axs[-1] if isinstance(self.axs, np.ndarray) else self.axs\r\n if ax.get_xlim()[0] >= self.x[0]:\r\n ax.set_xlim(ax.get_xticks()[0]-1,ax.get_xlim()[-1])\r\n if ax.get_xlim()[-1] <= self.x[-1]:\r\n ax.set_xlim(ax.get_xlim()[0], ax.get_xticks()[-1]+1)\r\n plt.xticks(self.x, np.unique(self.data.table[_x]))\r\n\r\n def SetCategoricalYAxis(self, ax=None):\r\n \"\"\"Additional settings for plots with categorical data\"\"\"\r\n if ax is None: # last axis, or self.axs is a single axis\r\n ax = self.axs[-1] if isinstance(self.axs, np.ndarray) else self.axs\r\n if ax.get_ylim()[0] >= self.x[0]:\r\n ax.set_ylim(ax.get_yticks()[0]-0.5,ax.get_ylim()[-1])\r\n if ax.get_ylim()[-1] <= self.x[-1]:\r\n ax.set_ylim(ax.get_ylim()[0], ax.get_yticks()[-1]+0.5)\r\n plt.yticks(self.x, np.unique(self.data.table[_x]))\r\n\r\n def SetDiscontinousAxis(self, x=None, y=None):\r\n \"\"\"Plot with discontious axis. Allows one discontinuity for each axis.\r\n Assume there is only 1 plot in the figure\r\n x: x break point\r\n y: y break point\r\n \"\"\"\r\n raise(NotImplementedError('This method is yet to be implemeneted'))\r\n if x is not None and y is not None:\r\n f,axs = plt.subplots(2,2,sharex=True,sharey=True)\r\n elif x is not None and y is None:\r\n f,axs = plt.subplots(2,1,sharey=True)\r\n elif x is None and y is not None:\r\n f,axs = plt.subplots(2,1,sharex=True)\r\n line = self.axs.get_lines()\r\n # plot the same data in all the subplots\r\n [ax.add_line(line) for ax in axs]\r\n # set axis\r\n self.SetVstackAxis() # set vertical stacked subplot axes\r\n for ax in self.axs:\r\n if not ax.is_first_col:\r\n ax.yaxis.set_visible(False)\r\n ax.spines['left'].set_visible(False)\r\n # add slashes between two plots\r\n\r\n def SetAxisOrigin(self, ax=None, xcenter=0, ycenter=0):\r\n if ax is None:\r\n ax = self.axs[-1] if isinstance(self.axs, np.ndarray) else self.axs\r\n ax.spines['left'].set_position(('data', xcenter))\r\n ax.spines['bottom'].set_position(('data', ycenter))\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.set_ticks_position('left')\r\n ax.spines['left'].set_capstyle('butt')\r\n ax.spines['bottom'].set_capstyle('butt')\r\n\r\n @AdjustAxs(excluded=['margins'])\r\n def SetAspectRatio(ax, r=2, adjustable='box-forced',margins=(0,0)):\r\n \"\"\"Set aspect ratio of the plots, across all axes.\r\n Must set margins before calling this function to set aspect\r\n ratios.\r\n r: ratio in data domains\r\n adjustable: see 'adjustable' argument for axes.set_aspect\r\n margins: account extra margins when setting aspect ratio.\r\n Default is (0,0)\r\n \"\"\"\r\n if not isinstance(r, str):\r\n dX = np.diff(ax.get_xlim())/(1+2*margins[0])\r\n dY = np.diff(ax.get_ylim())/(1+2*margins[1])\r\n aspect = dX/dY/r\r\n else:\r\n aspect = r\r\n ax.set_aspect(aspect=aspect, adjustable=adjustable)\r\n\r\n @AdjustAxs()\r\n def SetMargins(ax, x=0.25, y=0.25):\r\n \"\"\"Wrapper for setting margins\"\"\"\r\n ax.margins(x,y)\r\n\r\n def SetColor(self, plotobj, color, n, linewidth=0):\r\n \"\"\"Set colors. Would allow optionally turn off color\"\"\"\r\n if color is not None:\r\n for p in plotobj:\r\n p.set_color(color[n%len(color)])\r\n p.set_linewidth(linewidth)\r\n else:\r\n for p in plotobj:\r\n p.set_color('w')\r\n p.set_edgecolor('k')\r\n p.set_linewidth(1)\r\n\r\n def SetHatch(self, plotobj, hatch, n):\r\n \"\"\"Set hatch for patchs\"\"\"\r\n if hatch is not None:\r\n for p in plotobj:\r\n p.set_hatch(hatch[n%len(hatch)])\r\n\r\n \"\"\" #################### Text Annotations ####################### \"\"\"\r\n def AddTraceScaleBar(self, xunit, yunit, color='k',linewidth=None,\\\r\n fontsize=None, ax=None, xscale=None, yscale=None):\r\n \"\"\"Add scale bar on trace. Specifically designed for voltage /\r\n current / stimulus vs. time traces.\r\n xscale, yscale: add the trace bar to the specified window of x and y.\r\n \"\"\"\r\n if ax is None: ax=self.axs\r\n def scalebarlabel(x, unitstr):\r\n x = int(x)\r\n if unitstr.lower()[0] == 'm':\r\n return(str(x)+unitstr if x<1000 else str(int(x/1000))+\r\n unitstr.replace('m',''))\r\n elif unitstr.lower()[0] == 'p':\r\n return(str(x)+unitstr if x<1000 else str(int(x/1000))+\r\n unitstr.replace('p','n'))\r\n \r\n ax.set_axis_off() # turn off axis\r\n X = np.ptp(ax.get_xlim()) if xscale is None else xscale\r\n Y = np.ptp(ax.get_ylim()) if yscale is None else yscale\r\n # calculate scale bar unit length\r\n X, Y = self.roundto125(X/5), self.roundto125(Y/5)\r\n # Parse scale bar labels\r\n xlab, ylab = scalebarlabel(X, xunit), scalebarlabel(Y, yunit)\r\n # Get color of the scalebar\r\n if color is None:\r\n color = ax.get_lines()[0]\r\n if 'matplotlib.lines.Line2D' in str(type(color)):\r\n color = color.get_color()\r\n if linewidth is None:\r\n try:\r\n linewidth = ax.get_lines()[0]\r\n except:\r\n raise(AttributeError('Did not find any line in this axis. Please explicitly specify the linewidth'))\r\n if 'matplotlib.lines.Line2D' in str(type(linewidth)):\r\n linewidth = linewidth.get_linewidth()\r\n if fontsize is None:\r\n fontsize = ax.yaxis.get_major_ticks()[2].label.get_fontsize()\r\n # Calculate position of the scale bar\r\n # xi = np.max(ax.get_xlim()) + X/2.0\r\n # yi = np.mean(ax.get_ylim())\r\n # calculate position of text\r\n # xtext1, ytext1 = xi+X/2.0, yi-Y/10.0 # horizontal\r\n # xtext2, ytext2 = xi+X+X/10.0, yi+Y/2.0 # vertical\r\n # Draw the scalebar \r\n box1 = AuxTransformBox(ax.transData)\r\n box1.add_artist(plt.Rectangle((0,0),X, 0, fc=\"none\"))\r\n box2 = TextArea(xlab, minimumdescent=False, textprops=dict(color=color))\r\n boxh = VPacker(children=[box1,box2], align=\"center\", pad=0, sep=2)\r\n box3 = AuxTransformBox(ax.transData)\r\n box3.add_artist(plt.Rectangle((0,0),0,Y, fc=\"none\"))\r\n box4 = TextArea(ylab, minimumdescent=False, textprops=dict(color=color))\r\n box5 = VPacker(children=[box3, boxh], align=\"right\", pad=0, sep=0)\r\n box = HPacker(children=[box5,box4], align=\"center\", pad=0, sep=2)\r\n anchored_box = AnchoredOffsetbox(loc=5, pad=-9, child=box, frameon=False)\r\n ax.add_artist(anchored_box)\r\n return(anchored_box)\r\n\r\n def TextAnnotation(self, text=\"\", position='south', ax=None, xoffset=None,\r\n yoffset=None, color='k', fontsize=None, **kwargs):\r\n \"\"\"Annotating with text\r\n color: color of the text and traces. Default 'k'. If None, use the same\r\n color of the trace\r\n xoffset: the amount of space in horizontal direction, e.g. text around\r\n traces.\r\n ~ If None, no offsets.\r\n ~ If \"+\", add a space of a single character to x position.\r\n The number of \"+\"s in the argument indicates the number of\r\n times that the single character space will be added.\r\n ~ If \"-\", subtract a space of single character to x position.\r\n Rule for multiple \"-\"s is the same for \"+\"\r\n ~ If a number, add this number to x position\r\n yoffset: the amount of space in vertical direction, e.g. between lines\r\n of text. The yoffset is applied the same as xoffset, only to\r\n y position\r\n fontsize: size of the font. Default None, use the same font size as\r\n the x tick labels\r\n **kwargs: additional argument for ax.text\r\n \"\"\"\r\n ax = self.axs if ax is None else ax\r\n\r\n if isinstance(position, str):\r\n # get axis parameter\r\n X, Y = np.ptp(ax.get_xticks()), np.ptp(ax.get_yticks())\r\n xytext = {\r\n 'north': (np.mean(ax.get_xticks()),np.max(ax.get_yticks())+Y/10.0),\r\n 'south': (np.mean(ax.get_xticks()),np.min(ax.get_yticks())-Y/10.0),\r\n 'east': (np.max(ax.get_xticks())+X/10.0,np.mean(ax.get_yticks())),\r\n 'west': (np.min(ax.get_xticks())-X/10.0,np.mean(ax.get_yticks())),\r\n 'northeast':(np.max(ax.get_xticks())+X/10.0,\r\n np.max(ax.get_yticks()) + Y/10.0),\r\n 'northwest':(np.min(ax.get_xticks())-X/10.0,\r\n np.max(ax.get_yticks()) + Y/10.0),\r\n 'southeast':(np.max(ax.get_xticks())+X/10.0,\r\n np.min(ax.get_yticks()) - Y/10.0),\r\n 'southwest':(np.max(ax.get_xticks())-X/10.0,\r\n np.min(ax.get_yticks()) - Y/10.0)\r\n }.get(position,ValueError('Unrecognized position %s'%position))\r\n if isinstance(xytext, Exception):\r\n raise(xytext)\r\n else: # assume numeric\r\n xytext = position\r\n\r\n if fontsize is None:\r\n fontsize = ax.yaxis.get_major_ticks()[2].label.get_fontsize()\r\n\r\n def calloffset(offset, ind): # xy offset modification\r\n if offset is not None:\r\n if '+' in offset:\r\n xytext[ind]+=offset.count('+')*self.xydotsize(ax,\r\n s=fontsize,scale=(1.,1.))[ind]\r\n elif '-' in offset:\r\n xytext[ind]-=offset.count('-')*self.xydotsize(ax,\r\n s=fontsize,scale=(1.,1.))[ind]\r\n else:\r\n try:\r\n xytext[ind] += float(offset)\r\n except:\r\n pass\r\n return(xytext)\r\n\r\n xytext = calloffset(xoffset, 0)\r\n xytext = calloffset(yoffset, 1)\r\n\r\n if color is None:\r\n color = ax.get_lines()[0]\r\n if 'matplotlib.lines.Line2D' in str(type(color)):\r\n color = color.get_color()\r\n\r\n txt = ax.text(xytext[0], xytext[1], text,color=color, size=fontsize,\r\n **kwargs)\r\n self.AdjustText(txt, ax=ax)\r\n\r\n def AnnotateOnGroup(self, m, text='*', vpos=None):\r\n \"\"\"Help annotate statistical significance over each group in Beeswarm /\r\n bar graph. Annotate several groups at a time.\r\n m: indices of the group. This is required.\r\n text: text annotation above the group. Default is an asterisk '*'\r\n vpos: y value of the text annotation, where text is positioned. Default\r\n is calculated by this method. For aesthetic reason, vpos will be\r\n applied to all groups specified in m\r\n \"\"\"\r\n # Calculate default value of vpos\r\n if vpos is None:\r\n I = self.axs.get_yticks()\r\n yinc = I[1]-I[0] # y tick increment\r\n Y = [max(x) for x in self.data.series['y']]\r\n vpos = max([Y[k] for k in m])+yinc/5.0\r\n X = self.axs.get_xticks()\r\n for k in m:\r\n txt = self.axs.text(X[k], vpos, text, ha='center',va='top')\r\n # adjust text so that it is not overlapping with data or title\r\n self.AdjustText(txt)\r\n\r\n def AnnotateBetweenGroups(self, m=0, n=1, text='*', hgap=0):\r\n \"\"\"Help annotate statistical significance between two groups in the\r\n Beeswarm / bar plot. Annotate one pair at a time.\r\n m, n indicates the index of the loci to annotate between.\r\n By default, m=0 (first category), and n=1 (second category)\r\n text: annotation text above the bracket between the two loci.\r\n Default is an asterisk \"*\" to indicate significance.\r\n hgap: horizontal gap between neighboring annotations. Default is 0.\r\n No gap will be added at m=0 or n=1\r\n All annotations are stored in self.axs.texts, a list of text objects.\r\n \"\"\"\r\n # Calculate Locus Position\r\n X = self.axs.get_xticks()\r\n I = self.axs.get_yticks()\r\n Y = [max(x) for x in self.data.table[_y]]\r\n yinc = I[1]-I[0] # y tick increment\r\n ytop = max(I) + yinc/10.0 # top of the annotation\r\n yoffset = (ytop-max(Y[m],Y[n]))/2.0\r\n # position of annotation bracket\r\n xa, xb = X[m]+hgap*int(m!=0), X[n]-hgap*int(n!=max(X))\r\n ya, yb = yoffset + Y[m], yoffset + Y[n]\r\n # position of annotation text\r\n xtext, ytext = (X[m]+X[n])/2.0, ytop+yinc/10.0\r\n # Draw text\r\n txt = self.axs.text(xtext,ytext, text, ha='center',va='bottom')\r\n # adjust text so that it is not overlapping with data or title\r\n self.AdjustText(txt)\r\n # Draw Bracket\r\n self.axs.annotate(\"\", xy=(xa,ya), xycoords='data',\r\n xytext=(xa,ytop), textcoords = 'data',\r\n annotation_clip=False,arrowprops=dict(arrowstyle=\"-\",\r\n connectionstyle=\"arc3\", shrinkA=0, shrinkB=0))\r\n self.axs.annotate(\"\", xy=(xa,ytop), xycoords='data',\r\n xytext=(xb,ytop), textcoords = 'data',\r\n annotation_clip=False,arrowprops=dict(arrowstyle=\"-\",\r\n connectionstyle=\"arc3\", shrinkA=0, shrinkB=0))\r\n self.axs.annotate(\"\", xy=(xb,ytop), xycoords='data',\r\n xytext=(xb,yb), textcoords = 'data',\r\n annotation_clip=False,arrowprops=dict(arrowstyle=\"-\",\r\n connectionstyle=\"arc3\", shrinkA=0, shrinkB=0))\r\n\r\n def AdjustText(self, txt, ax=None):\r\n \"\"\"Adjust text so that it is not being cutoff\"\"\"\r\n #renderer = self.axs.get_renderer_cache()\r\n if ax is None: ax = self.axs\r\n txt.set_bbox(dict(facecolor='w', alpha=0, boxstyle='round, pad=1'))\r\n plt.draw() # update the text draw\r\n txtbb = txt.get_bbox_patch().get_window_extent() # can specify render\r\n xmin, ymin, xmax, ymax = tuple(ax.transData.inverted().\r\n transform(txtbb).ravel())\r\n xbnd, ybnd = ax.get_xbound(), ax.get_ybound()\r\n if xmax > xbnd[-1]:\r\n ax.set_xbound(xbnd[0], xmax)\r\n if xmin < xbnd[0]:\r\n ax.set_xbound(xmin, xbnd[-1])\r\n if ymax > ybnd[-1]:\r\n ax.set_ybound(ybnd[0], ymax)\r\n if ymin < ybnd[0]:\r\n ax.set_ybound(ymin, ybnd[-1])\r\n\r\n def RemoveAnnotation(self):\r\n \"\"\"Remove all annotation and start over\"\"\"\r\n self.axs.texts = []\r\n\r\n\r\n \"\"\" ################# Geometry Annotations ####################### \"\"\"\r\n def DrawEllipsoid(self, center, radii, rvec=np.eye(3), \\\r\n numgrid=100, ax=None, color=color, alpha=0.6):\r\n \"\"\"Draw an ellipsoid given its parameters\r\n center: center [x0,y0,z0]\r\n radii: radii of the ellipsoid [rx, ry, rz]\r\n rvec: vector of the radii that indicates orientation. Default identity\r\n numgrid: number of points to estimate the ellipsoid. The higher the\r\n number, the smoother the plot. Defualt 100.\r\n \"\"\"\r\n # Caculate ellipsoid coordinates\r\n from simple.ellipsoid import Ellipsoid as Ellipsoid\r\n x,y,z = Ellipsoid(center, radii, rvec, numgrid)\r\n if ax is None:\r\n if not (isinstance(self.axs, np.ndarray) or \\\r\n isinstance(self.axs, list)):\r\n ax = self.axs # only 1 axis\r\n else:\r\n return\r\n ax.plot_surface(x,y,z,rstride=4,cstride=4,linewidth=0,\\\r\n alpha=alpha,color=color[self.cache%len(color)])\r\n self.cache += 1 # increase color cache index by 1\r\n\r\n def DrawRect(self, x,y,w,h, ax=None, color=color, alpha=0.6):\r\n \"\"\"Draw a rectangular bar\r\n x,y,w,h: xcenter, ycenter, width, height\r\n \"\"\"\r\n from matplotlib.patches import Rectangle\r\n if ax is None:\r\n if not (isinstance(self.axs, np.ndarray) or \\\r\n isinstance(self.axs, list)):\r\n ax = self.axs # only 1 axis\r\n else:\r\n return\r\n ax.add_patch(Rectangle((x-w/2.0, y-h/2.0), w, h, angle=0.0, \\\r\n facecolor=color[self.cache%len(color)]))\r\n self.caceh += 1 # increase color cache index by 1\r\n # Send the patch to the background, but right above the previous patch\r\n self.SetZOrder(style='overlay')\r\n\r\n def SetZOrder(self, plotobj=None, style=None, order=None):\r\n \"\"\"Organizing layers of plot. This is helpful when exporting to .eps\"\"\"\r\n raise(NotImplementedError('This mehtod is not implemented'))\r\n if style == 'back': # send the layer all the way back\r\n None\r\n elif style == 'front': # send the layer all the way front\r\n None\r\n elif style == 'overlay':\r\n # First identify the same type of objects starting from the bottom,\r\n # then put the current object on top of the top-most found object\r\n patch_order = 0\r\n self.SetZOrder(plotobj, style=None, order=patch_order)\r\n if order>0: # send the plot object 1 layer forward\r\n None\r\n elif order<0: # send the plot object 1 layer backward\r\n None\r\n\r\n \"\"\" ####################### Misc ####################### \"\"\"\r\n @staticmethod\r\n def roundto125(x, r=np.array([1,2,5,10])): # helper static function\r\n \"\"\"5ms, 10ms, 20ms, 50ms, 100ms, 200ms, 500ms, 1s, 2s, 5s, etc.\r\n 5mV, 10mV, 20mV, etc.\r\n 5pA, 10pA, 20pA, 50pA, etc.\"\"\"\r\n p = int(np.floor(np.log10(x))) # power of 10\r\n y = r[(np.abs(r-x/(10**p))).argmin()] # find closest value\r\n return(y*(10**p))\r\n\r\n @staticmethod\r\n def get_field(struct, *args): # layered /serial indexing\r\n try:\r\n for m in args:\r\n struct = struct[m]\r\n return(struct)\r\n except:\r\n return(None)\r\n\r\n @staticmethod\r\n def ind2sub(ind, size, order='C'):\r\n \"\"\"MATLAB's ind2usb\r\n order: in 'C' order by default\"\"\"\r\n return(np.unravel_index(ind, size,order=order))\r\n\r\n @staticmethod\r\n def sub2ind(sub, size, order='C'):\r\n \"\"\"MATLAB's sub2ind\r\n order: in 'C' order by default\"\"\"\r\n return(np.ravel_multi_index(sub, dims=size, order=order))\r\n\r\n @staticmethod\r\n def xydotsize(ax, s=None, dpi=None, scale=(1.25,1.25)):\r\n \"\"\" Determine dot size in data axis.\r\n scale: helps further increasing space between dots\r\n \"\"\"\r\n figw, figh = ax.get_figure().get_size_inches() # figure width, height in inch\r\n dpi = float(ax.get_figure().get_dpi()) if dpi is None else float(dpi)\r\n w = (ax.get_position().xmax-ax.get_position().xmin)*figw # axis width in inch\r\n h = (ax.get_position().ymax-ax.get_position().ymin)*figh # axis height in inch\r\n xran = ax.get_xlim()[1]-ax.get_xlim()[0] # axis width in data\r\n yran = ax.get_ylim()[1]-ax.get_ylim()[0] # axis height in data\r\n if s is None:\r\n xsize=0.08*xran/w*scale[0] # xscale * proportion of xwidth in data\r\n ysize=0.08*yran/h*scale[1] # yscale * proportion of yheight in data\r\n else:\r\n xsize=np.sqrt(s)/dpi*xran/w*scale[0] # xscale * proportion of xwidth in data\r\n ysize=np.sqrt(s)/dpi*yran/h*scale[1] # yscale * proportion of yheight in data\r\n\r\n return(xsize, ysize)\r\n\r\n\r\n def SetFont(ax, fig, fontsize=12,fontname='Arial',items=None):\r\n \"\"\"Change font properties of all axes\r\n ax: which axis or axes to change the font. Default all axis in current\r\n instance. To skip axis, input as [].\r\n fig: figure handle to change the font (text in figure, not in axis).\r\n Default is any text items in current instance. To skip, input as [].\r\n fontsize: size of the font, specified in the global variable\r\n fontname: fullpath of the font, specified in the global variable\r\n items: select a list of items to change font. ['title', 'xlab','ylab',\r\n 'xtick','ytick', 'texts','legend','legendtitle','textartist']\r\n \"\"\"\r\n def unpack_anchor_offsetbox(box):\r\n \"\"\"Getting only text area items from the anchor offset box\"\"\"\r\n itemList = []\r\n counter = 0\r\n maxiter=100 # terminate at this iteration\r\n def unpacker(box):\r\n return box.get_children()\r\n \r\n # vectorize\r\n unpacker = np.frompyfunc(unpacker, 1,1)\r\n # Get the children\r\n while counter<maxiter and box:\r\n # recursively unpack the anchoroffsetbox or v/hpacker\r\n box = np.hstack(unpacker(box)).tolist()\r\n for nn, b in enumerate(box):\r\n if 'matplotlib.text.Text' in str(type(b)):\r\n itemList.append(b)\r\n box[nn] = None\r\n # remove recorded\r\n box = [b for b in box if b is not None]\r\n counter += 1\r\n \r\n return itemList\r\n \r\n def get_ax_items(ax):\r\n \"\"\"Parse axis items\"\"\"\r\n itemDict={'title':[ax.title], 'xlab':[ax.xaxis.label],\r\n 'ylab':[ax.yaxis.label], 'xtick':ax.get_xticklabels(),\r\n 'ytick':ax.get_yticklabels(),\r\n 'texts':ax.texts if isinstance(ax.texts,(np.ndarray,list))\r\n else [ax.texts],\r\n 'legend': [] if not ax.legend_\r\n else ax.legend_.get_texts(),\r\n 'legendtitle':[] if not ax.legend_\r\n else [ax.legend_.get_title()], \r\n 'textartist':[] if not ax.artists\r\n else unpack_anchor_offsetbox(ax.artists)}\r\n itemList, keyList = [], []\r\n if items is None: # get all items\r\n for k, v in iter(itemDict.items()):\r\n itemList += v\r\n keyList += [k]*len(v)\r\n else: # get only specified item\r\n for k in items:\r\n itemList += itemDict[k] # add only specified in items\r\n keyList += [k]*len(itemDict[k])\r\n\r\n return(itemList, keyList)\r\n\r\n def get_fig_items(fig):\r\n \"\"\"Parse figure text items\"\"\"\r\n itemList = fig.texts if isinstance(fig.texts,(np.ndarray,list)) \\\r\n else [fig.texts]\r\n keyList = ['texts'] * len(itemList)\r\n\r\n return(itemList, keyList)\r\n \r\n def CF(itemList, keyList):\r\n \"\"\"Change font given item\"\"\"\r\n # initialize fontprop object\r\n fontprop = fm.FontProperties(style='normal', weight='normal',\r\n stretch = 'normal')\r\n if os.path.isfile(fontname): # check if font is a file\r\n fontprop.set_file(fontname)\r\n else:# check if the name of font is available in the system\r\n if not any([fontname.lower() in a.lower() for a in\r\n fm.findSystemFonts(fontpaths=None, fontext='ttf')]):\r\n print('Cannot find specified font: %s' %(fontname))\r\n fontprop.set_family(fontname) # set font name\r\n # set font for each object\r\n for n, item in enumerate(itemList):\r\n if isinstance(fontsize, dict):\r\n fontprop.set_size(fontsize[keyList[n]])\r\n elif n <1: # set the properties only once\r\n fontprop.set_size(fontsize)\r\n item.set_fontproperties(fontprop) # change font for all items\r\n\r\n\r\n def CF_ax(ax): # combine CF and get_ax_items\r\n if not ax: # true when empty or None\r\n return # skip axis font change\r\n itemList, keyList = get_ax_items(ax)\r\n CF(itemList, keyList)\r\n\r\n def CF_fig(fig): # combine CF and get_fig_items\r\n if not fig: # true when empty or None\r\n return # skip figure font change\r\n itemsList, keyList = get_fig_items(fig)\r\n CF(itemsList, keyList)\r\n\r\n # vecotirze the closure\r\n CF_ax_vec = np.frompyfunc(CF_ax, 1,1)\r\n CF_fig_vec = np.frompyfunc(CF_fig, 1,1)\r\n\r\n # Do the actual font change\r\n CF_ax_vec(ax)\r\n CF_fig_vec(fig)\r\n\r\nif __name__ == \"__main__\":\r\n dataFile = os.path.join(exampleFolder, '%s.csv' %plotType)\r\n # Load data\r\n if plotType != 'neuro':\r\n K = PublicationFigures(dataFile=dataFile, savePath=os.path.join(exampleFolder,'%s.png'%plotType))\r\n if plotType == 'lineplot':\r\n # Line plot example\r\n K.LinePlot(style=style)\r\n #K.axs[0].set_ylim([0.5,1.5])\r\n #K.axs[1].set_ylim([0.05, 0.25])\r\n elif plotType == 'boxplot':\r\n # boxplot example\r\n K.Boxplot()\r\n elif plotType == 'beeswarm':\r\n # Beeswarm example\r\n K.Beeswarm()\r\n #K.AnnotateOnGroup(m=[0,1])\r\n #K.AnnotateBetweenGroups(text='p=0.01234')\r\n elif plotType == 'trace':\r\n # Time series example\r\n K.Traces()\r\n elif plotType == 'barplot':\r\n K.BarPlot(style='Vertical')\r\n elif plotType == 'scatter':\r\n K.Scatter()\r\n elif plotType == 'scatter3d':\r\n K.Scatter3D()\r\n elif plotType == 'boxplot':\r\n K.Boxplot()\r\n elif plotType == 'neuro':\r\n #base_dir = 'D:/Data/2015/08.August/Data 25 Aug 2015/Neocortex B.25Aug15.S1.E%d.dat'\r\n #eps = [14, 22, 29, 39] # AHP\r\n #data = [base_dir%(epi) for epi in eps]\r\n #K = PublicationFigures(dataFile=data, savePath=os.path.join(exampleFolder,'multiple_traces.png'), old=True, channels=['A'], streams=['Volt'])\r\n #K.Traces(outline='overlap')\r\n if style == 'single':\r\n data = 'D:/Data/2015/07.July/Data 10 Jul 2015/Neocortex K.10Jul15.S1.E38.dat'\r\n K = PublicationFigures(dataFile=data, savePath=os.path.join(exampleFolder,'single_episode_traces.png'), old=True, channels=['A'], streams=['Volt', 'Cur'])\r\n K.SingleEpisodeTraces(K.data.table, notes=K.data.meta['notes'][0], channels=['A'], streams=['Volt','Cur'])\r\n elif style == 'multiple':\r\n base_dir = 'D:/Data/Traces/2015/10.October/Data 21 Oct 2015/Neocortex C.21Oct15.S1.E%d.dat'\r\n result_dir = 'C:/Users/Edward/Documents/Assignments/Case Western Reserve/StrowbridgeLab/Projects/TeA Persistence Cui and Strowbridge 2015/analysis/Self termination with stimulation - 10222015/example.eps'\r\n result_dir = 'C:/Users/Edward/Desktop/asdf.svg'\r\n eps = range(53, 58, 1)\r\n data = [base_dir %(epi) for epi in eps]\r\n K = PublicationFigures(dataFile=data, savePath=result_dir, old=True, channels=['A'], streams=['Volt'] )\r\n K.MultipleTraces(channel='A', stream='Volt', window=[2000, 4000])\r\n elif style == 'concatenated':\r\n base_dir = 'D:/Data/Traces/2015/06.June/Data 17 Jun 2015/Neocortex H.17Jun15.S1.E%d.dat'\r\n result_dir = 'C:/Users/Edward/Desktop/concatplot.svg'\r\n eps = np.arange(150, 160, 1)\r\n data = [base_dir %(epi) for epi in eps]\r\n K = PublicationFigures(dataFile=data, savePath=result_dir, old=True, channels=['A'], streams=['Volt'] )\r\n K.ConcatenatedTraces(channel='A',stream='Volt')\r\n\r\n # Final clean up\r\n #K.fig.show()\r\n K.Save()\r\n"
},
{
"alpha_fraction": 0.5888993144035339,
"alphanum_fraction": 0.604891836643219,
"avg_line_length": 26.026315689086914,
"blob_id": "47bc41be31b7a783707a199a50c0ec1e4a89fca7",
"content_id": "ca063bb122dbdde615c43990eac8dcc6bd39d9ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1063,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 38,
"path": "/Plots/simple/center_spine.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 4 02:49:09 2016\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\ndef center_spines(ax=None, centerx=0, centery=0, endarrow=False):\r\n \"\"\"Centers the axis spines at <centerx, centery> on the axis \"ax\", and\r\n places arrows at the end of the axis spines.\"\"\"\r\n if ax is None:\r\n ax = plt.gca()\r\n\r\n # Set the axis's spines to be centered at the given point\r\n # (Setting all 4 spines so that the tick marks go in both directions)\r\n ax.spines['left'].set_position(('data', centerx))\r\n ax.spines['bottom'].set_position(('data', centery))\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.set_ticks_position('left')\r\n ax.spines['left'].set_capstyle('butt')\r\n ax.spines['bottom'].set_capstyle('butt')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n x = np.arange(-5, 5)\r\n y = x\r\n \r\n line, = plt.plot(x, y)\r\n center_spines()\r\n plt.axis('equal')\r\n plt.show()"
},
{
"alpha_fraction": 0.5056836605072021,
"alphanum_fraction": 0.518674910068512,
"avg_line_length": 32.380435943603516,
"blob_id": "b22d08a95310a44da7bc43077102ca35e025dda5",
"content_id": "4a48caf55fc470a9bf6bde40d574f657ecf725dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3079,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 92,
"path": "/Spikes/spikedetekt2/spikedetekt2/core/tests/test_script.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Main module tests.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport os.path as op\nimport numpy as np\nimport tempfile\n\nfrom kwiklib import (excerpts, get_params, pydict_to_python, get_filenames,\n itervalues, create_trace, Experiment)\nfrom spikedetekt2.core.script import run_spikedetekt\n\n\n# -----------------------------------------------------------------------------\n# Fixtures\n# -----------------------------------------------------------------------------\nDIRPATH = None\nprm_filename = 'myexperiment.prm'\nprb_filename = 'myprobe.prb'\ndat_filename = 'myexperiment.dat'\nname = 'myexperiment'\n\nsample_rate = 20000.\nduration = 1.\nnchannels = 8\nnsamples = int(sample_rate * duration)\n\n \ndef setup():\n global DIRPATH\n DIRPATH = tempfile.mkdtemp()\n \n # Create DAT file.\n raw_data = create_trace(nsamples, nchannels)\n for start, end in excerpts(nsamples, nexcerpts=10, excerpt_size=10):\n raw_data[start:end] += np.random.randint(low=-10000, high=10000, \n size=(10, nchannels))\n raw_data.tofile(op.join(DIRPATH, dat_filename))\n\n # Create PRM file.\n prm = get_params(**{\n 'raw_data_files': dat_filename,\n 'experiment_name': name,\n 'nchannels': nchannels,\n 'sample_rate': sample_rate,\n 'detect_spikes': 'positive',\n 'prb_file': prb_filename,\n })\n prm_contents = pydict_to_python(prm)\n with open(op.join(DIRPATH, prm_filename), 'w') as f:\n f.write(prm_contents)\n \n # Create PRB file.\n prb_contents = \"\"\"\n nchannels = %NCHANNELS%\n channel_groups = {0:\n {\n 'channels': list(range(nchannels)),\n 'graph': [(i, i + 1) for i in range(nchannels - 1)],\n }\n }\"\"\".replace('%NCHANNELS%', str(nchannels)).replace(' ', '')\n with open(op.join(DIRPATH, prb_filename), 'w') as f:\n f.write(prb_contents)\n\ndef teardown():\n os.remove(op.join(DIRPATH, prm_filename))\n os.remove(op.join(DIRPATH, prb_filename))\n \n files = get_filenames(name, dir=DIRPATH)\n [os.remove(path) for path in itervalues(files)]\n\n\n# -----------------------------------------------------------------------------\n# Main tests\n# -----------------------------------------------------------------------------\ndef test_main_1():\n run_spikedetekt(op.join(DIRPATH, prm_filename))\n \n # Open the data files.\n with Experiment(name, dir=DIRPATH) as exp:\n nspikes = len(exp.channel_groups[0].spikes)\n assert exp.channel_groups[0].spikes.clusters.main.shape[0] == nspikes\n assert exp.channel_groups[0].spikes.features_masks.shape[0] == nspikes\n assert exp.channel_groups[0].spikes.waveforms_filtered.shape[0] == nspikes\n \n fm = exp.channel_groups[0].spikes.features_masks\n assert fm[:,:,0].min() < fm[:,:,0].max()\n \n # Make sure the masks are not all null.\n assert fm[:,:,1].max() > 0\n "
},
{
"alpha_fraction": 0.5271473526954651,
"alphanum_fraction": 0.5558620691299438,
"avg_line_length": 42.28804397583008,
"blob_id": "d3a816cb6377e22a7b51f979ea1744d364712861",
"content_id": "3fcbd0657a4e066521b9722dc84005a0431b56d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7975,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 184,
"path": "/generic/CNNReceptiveField.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# [filter size, stride, padding]\n#Assume the two dimensions are the same\n#Each kernel requires the following parameters:\n# - k_i: kernel size\n# - s_i: stride\n# - p_i: padding (if padding is uneven, right padding will higher than left padding; \"SAME\" option in tensorflow)\n#\n#Each layer i requires the following parameters to be fully represented:\n# - n_i: number of feature (data layer has n_1 = imagesize )\n# - j_i: distance (projected to image pixel distance) between center of two adjacent features\n# - r_i: receptive field of a feature in layer i\n# - start_i: position of the first feature's receptive field in layer i (idx start from 0, negative means the center fall into padding)\n\nimport numpy as np\nfrom pdb import set_trace\n\n\nlayerInfos = [] # global\n\n\ndef _outFromIn(conv, layerIn):\n n_in = layerIn[0]\n j_in = layerIn[1]\n r_in = layerIn[2]\n start_in = layerIn[3]\n k = conv[0]\n s = conv[1]\n p = conv[2]\n if isinstance(p, str):\n p = {'same':1, 'valid':0, 'causal':1}.get(p.lower())\n \n if s > 1-1E-9: # pooling\n n_out = np.floor((n_in - k + 2*p)/s)+1\n else: # upsampling\n n_out = np.ceil((n_in-k + 2*p+1)/s)\n actualP = (n_out-1)*s - n_in + k\n pL = np.ceil(actualP/2)\n pR = np.floor(actualP/2)\n\n j_out = j_in * s\n r_out = r_in + (k - 1)*j_in\n start_out = start_in + ((k-1)/2 - pL)*j_in\n return n_out, j_out, r_out, start_out\n\ndef _printLayer(layer, layer_name, conv):\n if len(conv) > 1:\n if \"conv\" in layer_name.lower(): #pooling\n print(layer_name + \": kernel={} stride={} padding={}\".format(*conv)) \n elif 'upsample' in layer_name.lower():\n print(layer_name + \": kernel={} size={:.0f} padding={}\".format(conv[0], 1/conv[1], conv[2])) \n else: #pool\n print(layer_name + \": kernel={} pool_size={} padding={}\".format(*conv))\n else: # input layer\n print(layer_name + \": \")\n print(\"\\t n features: {:.0f} \\n \\t jump: {:.0f} \\n \\t receptive size: {:.0f} \\t start: {:.1f}\".format(*layer))\n\ndef ReceptiveField_Conv2D(convnet, layer_names, imsize, calc_all=False, query_centers=False):\n \"\"\"\n Assume the two dimensions are the same\n Each kernel requires the following parameters:\n - k_i: kernel size\n - s_i: stride\n - p_i: padding (if padding is uneven, right padding will be higher than left padding;\n \"SAME\" option in tensorflow)\n\n Each layer i requires the following parameters to be fully represented:\n - n_i: number of feature (data layer has n_1 = imagesize )\n - j_i: distance (projected to image pixel distance) between center of two adjacent features\n - r_i: receptive field of a feature in layer i\n - start_i: position of the first feature's receptive field in layer i (idx start from 0, negative means the center fall into padding)\n\n * Inputs:\n - convnet: [[kernel_size, stride, padding], [], ...]\n - layer_names: list of names of the layers\n - imsize: image size, single integer\n - query_centers: whether or not get details on where the centers of the receptive fields are [False]\n - calc_all: When query_centers=True, whether or not calculate every single receptive field and \n its center for all neurons. Default False, which uses user prompts.\n \n * Outputs:\n Only when set calc_all = True. For each layer, returns a dictionary \n with the following fields (dictionary of dictionary)\n - centers: a n_i x n_i x 2 matrix of receptive field centers (pair of coordinates \n in the last dimension)\n - receptive: receptive field size\n - n: n_i, explained above\n - j: j_i, explained above\n - start: start_i, explained above\n\n For example:\n ```\n convnet = [[11,4,0],[3,2,0],[5,1,2],[3,2,0],[3,1,1],[3,1,1],[3,1,1],[3,2,0],[6,1,0], [1, 1, 0]]\n layer_names = ['conv1','pool1','conv2','pool2','conv3','conv4','conv5','pool5','fc6-conv', 'fc7-conv']\n imsize = 227\n layer_dict = ReceptiveField_Conv2D(convnet, layer_names, imsize, calc_all=True)\n ```\n \"\"\"\n #first layer is the data layer (image) with n_0 = image size; j_0 = 1; r_0 = 1; and start_0 = 0.5\n print (\"-------Net summary------\")\n currentLayer = [imsize, 1, 1, 0.5]\n\n _printLayer(currentLayer, \"input image\", [imsize])\n # Going through each layer and calcualte the infos\n for i in range(len(convnet)):\n currentLayer = _outFromIn(convnet[i], currentLayer)\n layerInfos.append(currentLayer)\n _printLayer(currentLayer, layer_names[i], convnet[i])\n print (\"------------------------\")\n \n # Calculate the receptive fields\n if not query_centers:\n return\n \n if not calc_all:\n layer_name = input(\"Layer name where the feature in: \") # prompt user\n layer_idx = layer_names.index(layer_name)\n idx_x = int(input(\"index of the feature in x dimension (from 0)\"))\n idx_y = int(input (\"index of the feature in y dimension (from 0)\"))\n \n n = layerInfos[layer_idx][0]\n j = layerInfos[layer_idx][1]\n r = layerInfos[layer_idx][2]\n start = layerInfos[layer_idx][3]\n assert(idx_x < n)\n assert(idx_y < n)\n \n print(\"\\n (x, y) = ({:.0f} , {:.0f})\".format(idx_x, idx_y))\n print (\"receptive field: ({}, {})\".format(r, r))\n print (\"center: ({:f}, {:f})\".format(start+idx_x*j, start+idx_y*j))\n else: # calcualte all of them\n layer_dict = {}\n for layer_name in layer_names:\n layer_dict[layer_name] = {}\n layer_idx = layer_names.index(layer_name)\n n = int(layerInfos[layer_idx][0])\n j = layerInfos[layer_idx][1]\n r = layerInfos[layer_idx][2]\n start = layerInfos[layer_idx][3]\n \n xx, yy = np.meshgrid(range(n), range(n), indexing='ij')\n centers = np.stack([xx, yy], axis=2)\n centers = start + centers * j\n \n #centers = np.empty((n, n, 2))\n #for idx_x in range(n):\n # for idx_y in range(n):\n # centers[idx_x, idx_y, :] = start+idx_x*j, start+idx_y*j\n \n layer_dict[layer_name]['centers'] = centers\n layer_dict[layer_name]['receptive'] = [r, r]\n layer_dict[layer_name]['n'] = n\n layer_dict[layer_name]['j'] = j\n layer_dict[layer_name]['start'] = start\n \n return layer_dict\n \nif __name__ == '__main__': \n \n #Conv:[kernel_size, stride=1, padding=1('same')]\n #Pool: [kernel_size (from Conv), pool_size, padding=1('same')]\n #UpSampling: [kernel_size, 1/size, padding=1('same')]\n layer_names = ['conv-1', 'pool-1', 'conv0','pool0', \n 'conv1','pool1','conv2','pool2',\n 'conv3', 'upsample3', 'conv4', 'upsample4', \n 'conv5', 'upsample5', 'conv6', 'upsample6', 'conv7']\n k = 3\n convnet = [[k, 1, 'same'], [k, 2, 'same'], [k, 1, 'same'], [k, 2, 'same'],\n [k, 1, 'same'], [k, 2, 'same'], [k, 1, 'same'], [k, 2, 'same'], \n [k, 1, 'same'], [k, 1/2, 1], [k, 1, 'same'], [k, 1/2, 'same'], \n [k, 1, 'same'], [k, 1/2, 1], [k, 1, 'same'], [k, 1/2, 1], [k, 1, 'same']]\n \n \n #layer_names = ['conv1','pool1','conv2','pool2','conv3', 'upsample3', 'conv4', 'upsample4', 'conv5']\n # = 3\n #convnet = [[k, 1, 'same'], [k, 2, 'same'], [k, 1, 'same'], [k, 2, 'same'], \n # [k, 1, 'same'], [k, 1/2, 1], [k, 1, 'same'], [k, 1/2, 'same'], \n # [k, 1, 'same']]\n \n imsize = 16384\n\n #convnet = [[11,4,0],[3,2,0],[5,1,2],[3,2,0],[3,1,1],[3,1,1],[3,1,1],[3,2,0],[6,1,0], [1, 1, 0]]\n #layer_names = ['conv1','pool1','conv2','pool2','conv3','conv4','conv5','pool5','fc6-conv', 'fc7-conv']\n #imsize = 227\n layer_dict = ReceptiveField_Conv2D(convnet, layer_names, imsize, calc_all=False)\n \n \n"
},
{
"alpha_fraction": 0.4882589876651764,
"alphanum_fraction": 0.5176308751106262,
"avg_line_length": 31.043533325195312,
"blob_id": "6005f24f7e0f06ee09f8332cdd18503190522612",
"content_id": "d5b1960dce2d8cdc4d85cb6999b45f43ccb5988e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 51796,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 1562,
"path": "/generic/MATLAB.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 30 01:41:55 2015\r\n\r\nPython implemented MATLAB utilities\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport sys\r\nimport os\r\nimport re\r\nimport glob\r\nimport operator\r\nfrom pdb import set_trace\r\nfrom collections import OrderedDict, Iterable\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nimport scipy as sp\r\nfrom scipy import stats\r\nfrom scipy import sparse\r\nfrom skimage.draw import polygon\r\n\r\n\r\ndef getfield(struct, *args): # layered /serial indexing\r\n \"\"\"Get value from a field from a dictionary /structure\"\"\"\r\n try:\r\n for m in args:\r\n struct = struct[m]\r\n return(struct)\r\n except:\r\n return(None)\r\n\r\ndef ind2sub(ind, size, order='C'):\r\n \"\"\"MATLAB's ind2sub\r\n order: in 'C' order by default\"\"\"\r\n return(np.unravel_index(ind, size, order=order))\r\n\r\n\r\ndef sub2ind(sub, size, order='C'):\r\n \"\"\"MATLAB's sub2ind\r\n order: in 'C' order by default\"\"\"\r\n return(np.ravel_multi_index(sub, dims=size, order=order))\r\n\r\n\r\ndef getconsecutiveindex(t, N=1, interval=True):\r\n \"\"\"Given a sorted array of integers, find the start and the end of\r\n consecutive blocks\r\n E.g. t = [-1, 1,2,3,4,5, 7, 9,10,11,12,13, 15],\r\n return [1,5; 7,11]\r\n t: the sorted array of integers\r\n N: filter for at least N consecutive. Default 1\r\n interval: if True, we are filtering by N consecutive intervals instead of\r\n N consecutive numbers\r\n\r\n \"\"\"\r\n x = np.diff(t) == 1\r\n f = np.where(np.concatenate(([False], x))!=np.concatenate((x, [False])))[0]\r\n f = np.reshape(f, (-1, 2))\r\n # filter for at least N consecutvie\r\n f = f[(int(not(interval))+np.diff(f, n=1, axis=1).T[0]) >= N, :]\r\n return(f)\r\n\r\ndef consecutivenum2str(t, N=1):\r\n \"\"\"Given a sorted array of integers, return the shortened list\r\n E.g.\r\n E.g. t = [-1, 1,2,3,4,5, 7, 9,10,11,12,13, 15],\r\n return '-1, 1-5, 7, 9-13, 15'\r\n t: the sorted array of integers\r\n N: filter for at least N consecutive. Default 1\r\n \"\"\"\r\n f = getconsecutiveindex(t, N=N)\r\n # blocks\r\n b = [str(t[m[0]])+'-'+str(t[m[1]]) for m in f]\r\n # singles\r\n if N<2:\r\n s = np.array([], dtype=np.int32)\r\n for m in f:\r\n s = np.concatenate((s, np.arange(m[0], m[1]+1, dtype=np.int32)))\r\n s = np.setdiff1d(np.arange(0, len(t)), s)\r\n for n in s:\r\n b.append(str(t[n]))\r\n f = np.argsort(np.concatenate((f[:,0], s)))\r\n b = [b[k] for k in f]\r\n\r\n return(', '.join(b))\r\n\r\ndef str2numeric(lit):\r\n \"\"\"\"Handling only single numbers\"\"\"\r\n # Handle '0'\r\n if lit == '0': return 0\r\n # Handle 'inf'\r\n if lit.lower() == '-inf': return -np.inf\r\n if lit.lower() == 'inf': return np.inf\r\n # Handle nan\r\n if lit.lower() == 'nan': return np.nan\r\n # Handle None\r\n if lit.lower() == 'none': return None\r\n # Hex/Binary\r\n litneg = lit[1:] if lit[0] == '-' else lit\r\n if litneg[0] == '0':\r\n if litneg[1] in 'xX':\r\n return int(lit,16)\r\n elif litneg[1] in 'bB':\r\n return int(lit,2)\r\n else:\r\n try:\r\n return int(lit,8)\r\n except ValueError:\r\n pass\r\n\r\n # Int/Float/Complex\r\n try:\r\n return int(lit)\r\n except ValueError:\r\n pass\r\n try:\r\n return float(lit)\r\n except ValueError:\r\n pass\r\n try:\r\n complex(lit)\r\n except ValueError:\r\n raise(ValueError('String must contain only numerics'))\r\n\r\ndef str2num(lit, dlimiter=';'):\r\n \"\"\"MATLAB behavior of str2num.\r\n str2num('1') --> 1\r\n str2num('[5,3,2]') --> [5,3,2]\r\n str2num('[5,3,2;2,3,1]') --> [[5,3,2],[3,2,1]]\r\n \"\"\"\r\n # Separate the string by semicolon \";\" for each row\r\n lit = lit.split(dlimiter)\r\n # Identify all numbers\r\n lit = [re.findall(r\"[+-]?\\d+(?:\\.\\d+)?\", l) for l in lit]\r\n # Convert to a list of numbers\r\n lit = [[str2numeric(a) for a in l] for l in lit]\r\n lit = lit[0] if len(lit)==1 else lit # squeeze for vectors\r\n lit = lit[0] if len(lit)==1 else lit # squeeze again for singleton\r\n return(lit)\r\n\r\ndef rms(x):\r\n \"\"\"Root mean square of an array\"\"\"\r\n return(np.sqrt(np.mean(x**2)))\r\n\r\ndef findpeaks(x, mph=None, mpd=1, threshold=0, edge='rising',\r\n kpsh=False, valley=False):\r\n \"\"\"Detect peaks in data based on their amplitude and other features.\r\n Parameters\r\n ----------\r\n x : 1D array_like\r\n data.\r\n mph : {None, number}, optional (default = None)\r\n detect peaks that are greater than minimum peak height.\r\n mpd : positive integer, optional (default = 1)\r\n detect peaks that are at least separated by minimum peak distance (in\r\n number of data).\r\n threshold : positive number, optional (default = 0)\r\n detect peaks (valleys) that are greater (smaller) than `threshold`\r\n in relation to their immediate neighbors.\r\n edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')\r\n for a flat peak, keep only the rising edge ('rising'), only the\r\n falling edge ('falling'), both edges ('both'), or don't detect a\r\n flat peak (None).\r\n kpsh : bool, optional (default = False)\r\n keep peaks with same height even if they are closer than `mpd`.\r\n valley : bool, optional (default = False)\r\n if True (1), detect valleys (local minima) instead of peaks.\r\n\r\n Returns\r\n -------\r\n ind : 1D array_like indices of the peaks in `x`.\r\n pks: height of detected peaks.\r\n Notes\r\n -----\r\n The detection of valleys instead of peaks is performed internally by simply\r\n negating the data: `ind_valleys = findpeaks(-x)`\r\n\r\n The function can handle NaN's\r\n See this IPython Notebook [1]_.\r\n References\r\n ----------\r\n .. [1] http://nbviewer.jupyter.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb\r\n Examples\r\n --------\r\n >>> from findpeaks import findpeaks\r\n >>> x = np.random.randn(100)\r\n >>> x[60:81] = np.nan\r\n >>> # detect all peaks and plot data\r\n >>> ind = findpeaks(x, show=True)\r\n >>> print(ind)\r\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\r\n >>> # set minimum peak height = 0 and minimum peak distance = 20\r\n >>> findpeaks(x, mph=0, mpd=20, show=True)\r\n >>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]\r\n >>> # set minimum peak distance = 2\r\n >>> findpeaks(x, mpd=2, show=True)\r\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\r\n >>> # detection of valleys instead of peaks\r\n >>> findpeaks(x, mph=0, mpd=20, valley=True, show=True)\r\n >>> x = [0, 1, 1, 0, 1, 1, 0]\r\n >>> # detect both edges\r\n >>> findpeaks(x, edge='both', show=True)\r\n >>> x = [-2, 1, -2, 2, 1, 1, 3, 0]\r\n >>> # set threshold = 2\r\n >>> findpeaks(x, threshold = 2, show=True)\r\n\r\n __author__ = \"Marcos Duarte, https://github.com/demotu/BMC\"\r\n __version__ = \"1.0.4\"\r\n __license__ = \"MIT\"\r\n \"\"\"\r\n x = np.atleast_1d(x).astype('float64')\r\n if x.size < 3:\r\n return np.array([], dtype=int), np.array([], dtype=float)\r\n if valley:\r\n x = -x\r\n # find indexes of all peaks\r\n dx = x[1:] - x[:-1]\r\n # handle NaN's\r\n indnan = np.where(np.isnan(x))[0]\r\n if indnan.size:\r\n x[indnan] = np.inf\r\n dx[np.where(np.isnan(dx))[0]] = np.inf\r\n ine, ire, ife = np.array([[], [], []], dtype=int)\r\n if not edge:\r\n ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]\r\n else:\r\n if edge.lower() in ['rising', 'both']:\r\n ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]\r\n if edge.lower() in ['falling', 'both']:\r\n ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]\r\n ind = np.unique(np.hstack((ine, ire, ife)))\r\n # handle NaN's\r\n if ind.size and indnan.size:\r\n # NaN's and values close to NaN's cannot be peaks\r\n ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]\r\n # first and last values of x cannot be peaks\r\n if ind.size and ind[0] == 0:\r\n ind = ind[1:]\r\n if ind.size and ind[-1] == x.size-1:\r\n ind = ind[:-1]\r\n # remove peaks < minimum peak height\r\n if ind.size and mph is not None:\r\n ind = ind[x[ind] >= mph]\r\n # remove peaks - neighbors < threshold\r\n if ind.size and threshold > 0:\r\n dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)\r\n ind = np.delete(ind, np.where(dx < threshold)[0])\r\n # detect small peaks closer than minimum peak distance\r\n if ind.size and mpd > 1:\r\n ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height\r\n idel = np.zeros(ind.size, dtype=bool)\r\n for i in range(ind.size):\r\n if not idel[i]:\r\n # keep peaks with the same height if kpsh is True\r\n idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \\\r\n & (x[ind[i]] > x[ind] if kpsh else True)\r\n idel[i] = 0 # Keep current peak\r\n # remove the small peaks and sort back the indexes by their occurrence\r\n ind = np.sort(ind[~idel])\r\n\r\n pks = np.array([x[p] for p in ind])\r\n\r\n return(ind, pks)\r\n\r\ndef nextpow2(n):\r\n m_f = np.log2(n)\r\n m_i = np.ceil(m_f)\r\n return(m_i)\r\n\r\ndef isempty(m, singleton=True):\r\n \"\"\"Return true if:\r\n a). an empty string\r\n b). a list of length zero\r\n c). a tuple of length zero\r\n d). a numpy array of length zero\r\n e). a singleton that is not None\r\n\r\n if singleton is true: treat np.ndarray as a single object, and return\r\n false only when the ndarray is an empty array\r\n \"\"\"\r\n if isinstance(m, (list, tuple, str)):\r\n if len(m) == 0:\r\n return True\r\n else:\r\n return all([not x for x in m])\r\n elif isinstance(m, np.ndarray):\r\n if len(m) == 0:\r\n return True\r\n else:\r\n if singleton:\r\n return False\r\n else: # matrix\r\n K = np.empty_like(m, dtype=np.bool)\r\n K_shape = np.shape(K)\r\n for k in range(np.size(K)):\r\n ijk = np.unravel_index(k, K_shape, order='C')\r\n try:\r\n K[ijk] = True if m[ijk].size==0 else False\r\n except:\r\n K[ijk] = False\r\n return K\r\n else:\r\n return False if m else True\r\n\r\ndef isnumber(obj):\r\n \"\"\"Determine if the object is a single number\"\"\"\r\n attrs = ['__add__', '__sub__', '__mul__', '__pow__', '__abs__'] # need to have\r\n attrs_neg = ['__len__'] # cannot have\r\n return (all(hasattr(obj, attr) for attr in attrs)) and (not any(hasattr(obj, attr) for attr in attrs_neg))\r\n\r\ndef isnumeric(obj):\r\n \"\"\"Check if an object is numeric, or that elements in a list of objects\r\n are numeric. Set all=True to return a signle boolean if all elements of the list\r\n is numeric\"\"\"\r\n # Allow application to iterables\r\n f_vec = np.frompyfunc(isnumber, 1, 1)\r\n tf = f_vec(obj)\r\n if isinstance(tf, np.ndarray):\r\n tf = tf.astype(dtype=bool)\r\n return tf\r\n\r\ndef isstrnum(obj):\r\n \"\"\"Check if a string can be converted into numeric\"\"\"\r\n try:\r\n str2numeric(obj)\r\n return True\r\n except:\r\n return False\r\n\r\ndef isrow(v):\r\n v = np.asarray(v)\r\n return True if len(v.shape)==1 else False\r\n\r\ndef iscol(v):\r\n v = np.asarray(v)\r\n return True if len(v.shape)==2 and v.shape[1] == 1 else False\r\n\r\ndef isiterable(x):\r\n return isinstance(x, Iterable) and not isinstance(x, (str, bytes))\r\n\r\ndef isvector(v):\r\n v = np.asarray(v)\r\n if len(v.shape)==0:\r\n return False\r\n elif len(v.shape)==1:\r\n return True\r\n else: # len(v.shape) > 1:\r\n if v.shape[0] == 1 or v.shape[1] == 1:\r\n return True\r\n else:\r\n return False\r\n\r\ndef ismatrix(v):\r\n v = np.asarray(v)\r\n shape = v.shape\r\n if len(shape) == 2:\r\n return True if all([s>1 for s in shape]) else False\r\n elif len(shape) > 2:\r\n return True if sum([s>1 for s in shape])>=2 else False\r\n else:\r\n return False\r\n\r\ndef listintersect(*args):\r\n \"\"\"Find common elements in lists\"\"\"\r\n args = [x for x in args if x is not None] # get rid of None\r\n def LINT(A,B): #short for list intersection\r\n return list(set(A) & set(B))\r\n if len(args) == 0:\r\n return(None)\r\n elif len(args) == 1:\r\n return(args[0])\r\n elif len(args) == 2:\r\n return(LINT(args[0],args[1]))\r\n else:\r\n newargs = tuple([LINT(args[0], args[1])]) + args[2:]\r\n return(listintersect(*newargs))\r\n\r\ndef padzeros(x):\r\n \"\"\"Pad zeros to make the array length 2^n for fft or filtering\r\n y, l = padzeros(x)\r\n\r\n x: input vector\r\n y: zero-padded vector\r\n l: length of the original array\r\n \"\"\"\r\n l = len(x)\r\n pad = 2**nextpow2(l)\r\n if (pad - l) < (0.1*l):\r\n pad = 2**(nextpow2(l)+1)\r\n pad = int(pad - l) # legnth of padding\r\n x = np.concatenate((x, np.zeros(pad)))\r\n return(x, l)\r\n\r\ndef longest_repeated_substring(lst, ignore_nonword=True, inall=True):\r\n \"\"\"given a list of strings, find common substrings. Example:\r\n ['Neocortex A', 'Neocortex B', 'Neocortex C'], aligning to the left, yields\r\n 'Neocortex'.\r\n\r\n * ignore_nonword: By default ignore non-word characters, and only look for\r\n characters in [a-zA-Z0-9_]. To include everything, set this to False.\r\n Can also specify a set of characters to remove in regular expression.\r\n\r\n * inall: does the longest string has to be in all strings in the list.\r\n True: longest string has to be in all strings. Default.\r\n False: at least in 2 strings\r\n Some integer N: at least in N string\r\n \"\"\"\r\n longest = None\r\n if isinstance(inall, bool):\r\n count = len(lst)-1 if inall else 1\r\n else:\r\n count = inall\r\n\r\n # Look for the word\r\n for word in lst:\r\n for i in range(len(word)):\r\n for j in range(i+1, len(word)+1):\r\n if ((longest is None or (j - i > len(longest))) and\r\n sum(word[i:j] in w for w in lst) > count):\r\n longest = word[i:j]\r\n\r\n # Remove non-word character depending on the option\r\n if ignore_nonword:\r\n if isinstance(ignore_nonword, bool):\r\n ignore_nonword = '[^a-zA-Z0-9_]'\r\n longest = re.sub(ignore_nonword, '', longest)\r\n\r\n return(longest)\r\n\r\ndef cell2array(C):\r\n \"\"\"Helpful when reading MATLAB .mat files containing cellarray\"\"\"\r\n n, m = C.shape\r\n K = np.zeros((n,m))\r\n K = K.tolist()\r\n for i in range(n):\r\n for j in range(m):\r\n tmp = C[i][j]\r\n if tmp.shape == (1,1):\r\n tmp = tmp[0][0]\r\n elif tmp.shape == (1,):\r\n tmp = tmp[0]\r\n elif tmp.shape[0] == 1 or tmp.shape[1] == 1:\r\n tmp = tmp.flatten()\r\n K[i][j] = tmp\r\n return K\r\n\r\ndef cell2df(C):\r\n \"\"\"Take a step further than cell2array to convert cell array just read from\r\n MATLAB's .mat file into a pandas DataFrame\r\n \"\"\"\r\n df = cell2array(C)\r\n df = pd.DataFrame(df[1:], columns=df[0])\r\n return df\r\n\r\n\r\ndef dict2df(mydict, colnames=None, addindex=False):\r\n \"\"\" Converting a dictionary into a Pandas data frame. Example:\r\n df = dict2df(mydict, colnames=['Drug', 'BifTime'], addindex=True)\r\n Converting\r\n mydict ={'ChR2': np.asarray([25, 725, 225, 175, 825, 1075, 825, 125, 325, 875, 325, 575, 1325, 725]),\r\n 'Terfenadine': np.asarray([725, 275, 175, 675, 525, 775]),\r\n 'XE991': np.asarray([175, 75, 75, 125, 125]),\r\n 'NS8593': np.asarray([25, 25, 25, 75, 75, 75, 75])}\r\n\r\n into a data frame:\r\n\r\n index Drug BifTime\r\n 0 ChR2 25\r\n 1 ChR2 725\r\n 2 ChR2 225\r\n ...........................\r\n 0 Terfnadine 725\r\n 1 Terfenadine 275\r\n ...........................\r\n ...........................\r\n 0 NS8593 25\r\n ...........................\r\n\r\n colnames: column names of [key, values]\r\n addindex: add a column called \"index\" as the first column\r\n\r\n \"\"\"\r\n df = pd.DataFrame.from_dict(mydict, orient='index').transpose()\r\n df['index'] = df.index\r\n df = pd.melt(df, id_vars=[\"index\"])\r\n if not addindex:\r\n df = df.drop([\"index\"], axis=1)\r\n # Get rid of NaNs\r\n try:\r\n df = df.loc[~np.isnan(df['value'])]\r\n except:\r\n pass\r\n df = df.reset_index(drop=True)\r\n if colnames is not None:\r\n df.columns = ([\"index\"] if addindex else [] )+ list(colnames)\r\n\r\n return df\r\n\r\ndef cell2list_b(C):\r\n \"\"\"From loaded MATLAB cell to Python's list\r\n This assumes each element of the table has only 1 elemtns.\r\n Legacy: use cell2array for more general purposes.\r\n \"\"\"\r\n n, m = C.shape\r\n K = np.zeros((n,m))\r\n K = K.tolist()\r\n for i in range(n):\r\n for j in range(m):\r\n tmp = C[i][j][0]\r\n if isinstance(tmp, (np.ndarray, list)):\r\n tmp = tmp[0]\r\n\r\n K[i][j] = tmp\r\n return K\r\n\r\ndef list2df(K):\r\n \"\"\"From Python's list to panadas' data frame\"\"\"\r\n headers = K[0]\r\n df = {}\r\n for n, h in enumerate(headers):\r\n df[h] = [c[n] for c in K[1:]]\r\n\r\n df = pd.DataFrame(data=df, columns=headers)\r\n\r\n return df\r\n\r\n\r\ndef remap_dict_data_type(mapping, func_key=str, func_val=lambda x: x):\r\n \"\"\"Change the data type of keys and values of dictionary\"\"\"\r\n return {func_key(k): func_val(v) for k,v in mapping.items()}\r\n\r\ndef sort_nicely( l ):\r\n \"\"\" Sort the given list of strings in the way that humans expect.\"\"\"\r\n convert = lambda text: int(text) if text.isdigit() else text\r\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\r\n l.sort( key=alphanum_key )\r\n return l\r\n\r\ndef sortrows(A, col=None):\r\n \"\"\"Return sorted A, and index, such that A_old[index] = A_new\"\"\"\r\n A = np.asarray(A)\r\n if not ismatrix(A):\r\n if isrow(A):\r\n return np.sort(A), np.argsort(A)\r\n else:\r\n return np.sort(A, axis=0), np.argsort(A, axis=0)\r\n\r\n # Sort the whole row\r\n if not col:\r\n col = list(range(A.shape[1]))\r\n\r\n nrows = A.shape[0]\r\n I = np.arange(nrows)[:, np.newaxis]\r\n A = np.concatenate((A, I), axis=1)\r\n A = np.asarray(sorted(A, key=operator.itemgetter(*col)))\r\n I = list(A[:, -1]) # get the index\r\n # convert to numeric if index in string\r\n for n, i in enumerate(I):\r\n if not isnumeric(i):\r\n I[n] = str2num(i)\r\n # I = I[:, np.newaxis]\r\n I = np.asarray(I)\r\n A = A[:, :-1]\r\n\r\n return A, I\r\n\r\ndef uniquerows(data, prec=5, sort=True):\r\n # d_r = np.fix(data * 10 ** prec) / 10 ** prec + 0.0\r\n if isinstance(data, (list, tuple)) or (isinstance(data, np.ndarray) and isrow(data)):\r\n data = np.asarray(data)[:, np.newaxis] # convert to a column vector\r\n b = np.ascontiguousarray(data).view(np.dtype((np.void, data.dtype.itemsize * data.shape[1])))\r\n _, ia = np.unique(b, return_index=True)\r\n _, ic = np.unique(b, return_inverse=True)\r\n c = np.unique(b).view(data.dtype).reshape(-1, data.shape[1])\r\n if not sort:\r\n ia, sorted_ia_index = sortrows(ia)\r\n c = c[sorted_ia_index,:]\r\n for n, k in enumerate(ic): # reindex\r\n ic[n] = int(np.where(sorted_ia_index == k)[0])\r\n return c, ia, ic\r\n\r\ndef poly2mask(r, c, m, n):\r\n \"\"\"m, n: canvas size that contains this polygon mask\"\"\"\r\n fill_row_coords, fill_col_coords = polygon(r, c, (m, n))\r\n mask = np.zeros((m, n), dtype=np.bool)\r\n mask[fill_row_coords, fill_col_coords] = True\r\n return mask\r\n\r\n\r\ndef midpoint(v):\r\n \"\"\"find the midpoints of a vector\"\"\"\r\n return v[:-1] + np.diff(v)/2.0\r\n\r\n\r\ndef SearchFiles(path, pattern, sortby='Name'):\r\n \"\"\"\r\n Search for files\r\n sortby: 'Name', 'Modified Date', 'Created Date', 'Size'\"\"\"\r\n P = glob.glob(os.path.join(path, pattern))\r\n\r\n if isempty(P):\r\n return P, []\r\n\r\n N = [[]] * len(P)\r\n M = [[]] * len(P)\r\n C = [[]] * len(P)\r\n B = [[]] * len(P)\r\n for n, p in enumerate(P):\r\n N[n] = os.path.basename(os.path.normpath(p))\r\n M[n] = os.path.getmtime(p)\r\n C[n] = os.path.getctime(p)\r\n B[n] = os.path.getsize(p)\r\n\r\n # Sort\r\n if sortby == 'Name':\r\n pass\r\n elif sortby == 'Modified Date':\r\n P, N = zip(*[(x, y) for (z, x, y) in sorted(zip(M, P, N))])\r\n elif sortby == 'Created Date':\r\n P, N = zip(*[(x, y) for (z, x, y) in sorted(zip(C, P, N))])\r\n elif sortby == 'Size':\r\n P, N = zip(*[(x, y) for (z, x, y) in sorted(zip(M, P, N))])\r\n\r\n return P, N\r\n\r\ndef regexprep(STRING, EXPRESSION, REPLACE, N=None):\r\n \"\"\"Function similar to MATLAB's regexprep, which allows replacement\r\n substitution of Nth occrrrence of character, which Python's re package\r\n lacks built-in. Note N's index start at 0 to be consistent with Python. An\r\n advantage of this design allows the user to specify negative index.\"\"\"\r\n if N is None: # simply wrap the re.sub function\r\n return re.sub(EXPRESSION, REPLACE, STRING)\r\n else:\r\n indices = []\r\n for m in re.finditer(EXPRESSION, STRING):\r\n indices.append((m.start(), m.end(), m.group(0))) # start, end, whole match\r\n STRING = STRING[0:indices[N][0]] + REPLACE + STRING[(indices[N][1]+1):]\r\n return STRING\r\n\r\n\r\nfrom bisect import bisect_left, insort\r\ndef medfilt1(x, N):\r\n \"\"\"scipy.signal.medfilt is simply too slow on large kernel size or large\r\n data. This is an alternative. Not necessarily the same as MATLAB's\r\n implementation, but definitely faster than scipy's implementation.\r\n x: data\r\n N: order, or width of moving median\r\n \"\"\"\r\n l = list(x[0].repeat(N))\r\n #l.sort() # not needed because all values are the same here\r\n mididx = (N - 1) // 2\r\n result = np.empty_like(x)\r\n for idx, new_elem in enumerate(x):\r\n old_elem = x[max(0, idx - N)]\r\n del l[bisect_left(l, old_elem)]\r\n insort(l, new_elem)\r\n result[idx] = l[mididx]\r\n\r\n return result\r\n\r\ndef goodness_of_fit(xdata, ydata, popt, pcov, f0):\r\n \"\"\"Calculate goodness of fit from curve_fit\r\n popt, pcov: returend by curve_fit\r\n f0: function used for fitting f(x, a, b, c, ...)\r\n p0: initial value used\r\n \"\"\"\r\n yfit = f0(xdata, *popt)\r\n SSE = np.sum((yfit - ydata)**2)\r\n RMSE = np.sqrt(SSE/len(yfit))\r\n SS_total = np.sum((ydata - np.mean(ydata))**2)\r\n R_sq = 1.0 - SSE / SS_total\r\n R_sq_adj = 1.0 - (SSE/(len(xdata)-1)) / (SS_total/(len(xdata)-len(popt)-1))# Adjusted R_sq\r\n # R_sq_adj = 1-(1-R_sq)*(len(xdata-1))/(len(xdata)-len(popt)-1)\r\n gof = {'SSE': SSE, 'RMSE': RMSE, 'SS_total':SS_total, 'rsquare': R_sq, 'adjrsquare': R_sq_adj}\r\n\r\n return gof\r\n\r\ndef compare_goodness_of_fit(popt1, pcov1, popt2, pcov2, num_data_points, param_name=None, index=None):\r\n \"\"\"Perform a t-test on a pair of fitted curves\"\"\"\r\n nvars = len(popt1)\r\n pcov1, pcov2 = np.sqrt(np.diag(pcov1)), np.sqrt(np.diag(pcov2))\r\n if index is not None:\r\n popt1, popt2 = popt1[index], popt2[index]\r\n pcov1, pcov2 = pcov1[index], popt2[index]\r\n\r\n\r\n T, df, P = [[]]*nvars, [[]]*nvars, [[]]*nvars\r\n for n, t1, v1, t2, v2 in enumerate(zip(popt1, pcov1, popt2, pcov2)):\r\n T[n]= (t1-t2) / np.sqrt(v1^2 + v2^2)\r\n df[n] = (num_data_points-nvars)*2\r\n P[n] = sp.stats.t.cdf(T, df=df)\r\n\r\n nvars = len(popt1) # update for later looping\r\n\r\n if param_name is None:\r\n param_name = ['param_{:d}'.format(d) for d in range(nvars)]\r\n\r\n for n in range(nvars):\r\n print(\"{}: T = {:.4f}, df = {:d}, p = {:.4f}\\n\".format(param_name[n], T[n], df[n], P[n]))\r\n\r\n return T, df, P\r\n\r\ndef confidence_interval(ydata, popt, pcov, alpha=0.05, parameter_names=None):\r\n dof = max(0, len(ydata) - len(popt))\r\n tval = stats.distributions.t.ppf(1.0-alpha/2., dof)\r\n ci_list= []\r\n if parameter_names is None:\r\n for n, (p, var) in enumerate(zip(popt, np.diag(pcov))):\r\n sigma = var**0.5\r\n ci_list.append({'name': 'p{:d}'.format(n), 'mean': p, 'lower': p - tval * sigma, 'upper': p + tval * sigma})\r\n else:\r\n for pname, p, var in zip(parameter_names, popt, np.diag(pcov)):\r\n sigma = var**0.5\r\n ci_list.append({'name': pname, 'mean': p, 'lower': p - tval * sigma, 'upper': p + tval * sigma})\r\n\r\n return ci_list\r\n\r\n\r\ndef calculate_aic(n, mse, num_params):\r\n \"\"\"calculate aic for regression\r\n * n: number of training examples\r\n * mse: mean squared error\r\n * num_params: number of parameters of the model\r\n \"\"\"\r\n aic = n * np.log(mse) + 2 * num_params\r\n return aic\r\n\r\ndef calculate_bic(n, mse, num_params):\r\n \"\"\"calculate bic for regression\r\n * n: number of training examples\r\n * mse: mean squared error\r\n * num_params: number of parameters of the model\r\n \"\"\"\r\n bic = n * np.log(mse) + num_params * np.log(n)\r\n return bic\r\n\r\ndef serr(X, axis=0, toarray=False, printerr=False, returnOnError=None, *args, **kwargs):\r\n try:\r\n if toarray:\r\n X = np.array(X)\r\n return np.std(X, axis=axis, *args, **kwargs) / np.sqrt(np.shape(X)[axis])\r\n except Exception as err:\r\n if printerr:\r\n print(err)\r\n if returnOnError is None:\r\n return None\r\n elif returnOnError == 'first':\r\n return np.array(X)[0]\r\n\r\ndef diffx(X, axis=0, printerr=False, *args, **kwargs):\r\n \"\"\"Wrapper function to deal with Panda's recent weird behavior\"\"\"\r\n def df_diff_apply(df):\r\n df_new = df.iloc[0, :]\r\n for c in df_new.index:\r\n if df[c].dtype.__str__() == 'object':\r\n pass\r\n else:\r\n df_new[c] = np.diff(df[c], axis=axis, *args, **kwargs)\r\n if len(df_new[c])==1:\r\n df_new[c] = np.asscalar(df_new[c])\r\n\r\n return df_new\r\n try:\r\n return df_diff_apply(X)\r\n except Exception as err:\r\n if True:\r\n print(err)\r\n print(X)\r\n return None\r\n\r\ndef detect_outliers(x, percentile=[75, 25], factor=1.58, return_index=False, return_threshold=False):\r\n \"\"\"\r\n Using R's boxplot.stats way of outlier detection for robustness\r\n Usages:\r\n * outliers = detect_outliers(x)\r\n * outliers, min_index, max_index = detect_outliers(x, return_index=True)\r\n * outliers, min_index, max_index = detect_outliers(x, return_threshold=True)\r\n * outliers, min_index, max_index, min_C, max_C = detect_outliers(x, return_index=True, return_threshold=True)\r\n \"\"\"\r\n x = x[~np.isnan(x)]\r\n qq = np.percentile(x, percentile)\r\n q75, q25 = max(qq), min(qq)\r\n iqr = q75 - q25\r\n C = iqr * factor / np.sqrt(len(x))\r\n min_C = q25-C # Threshold for lower bound\r\n max_C = q75+C # Threshold for upper bound\r\n min_index = np.where(x<min_C) # Index of the lower bound outliers\r\n max_index = np.where(x>max_C) # Index of the upper bound outliers\r\n return_list = list(np.concatenate((np.array(x[min_index]),np.array(x[max_index]))))\r\n if return_index or return_threshold:\r\n return_list = [return_list]\r\n if return_index:\r\n return_list = return_list + [min_index] + [max_index]\r\n if return_threshold:\r\n return_list = return_list + [min_C] + [max_C]\r\n return tuple(return_list)\r\n\r\ndef frequency_modulated_sine(f0, f, duration, ts, phase=0):\r\n \"\"\"Return the frequency modulated sinosoidal wave\r\n f0: starting frequency [Hz]\r\n f: ending frequency [Hz]\r\n duration: duration of the wave [Sec]\r\n ts: sampling rate of wave [sec]\r\n phase: phase at the start of the wave, between [0, pi]\r\n \"\"\"\r\n\r\n nu = np.linspace(f0, f, int(duration / ts) + 1)\r\n t = np.arange(0, duration+ts, ts)\r\n Y = np.sin(2 * np.pi * nu * t + phase)\r\n return t, Y\r\n\r\ndef softmax(X):\r\n exps = np.exp(X)\r\n return exps / np.sum(exps)\r\n\r\ndef printProgressBar (iteration, total, prefix = 'Progress', suffix = 'Complete', decimals = 1, length = 100, fill = '█', mode='percentage'):\r\n \"\"\"\r\n Call in a loop to create terminal progress bar\r\n @params:\r\n iteration - Required : current iteration (Int)\r\n total - Required : total iterations (Int)\r\n prefix - Optional : prefix string (Str)\r\n suffix - Optional : suffix string (Str)\r\n decimals - Optional : positive number of decimals in percent complete (Int)\r\n length - Optional : character length of bar (Int)\r\n fill - Optional : bar fill character (Str)\r\n mode - Optional : display mode, either \"percentage\" or \"counts\"\r\n\r\n # Sample Usage:\r\n from time import sleep\r\n # A List of Items\r\n items = list(range(0, 57))\r\n l = len(items)\r\n\r\n # Initial call to print 0% progress\r\n printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)\r\n for i, item in enumerate(items):\r\n # Do stuff...\r\n sleep(0.1)\r\n # Update Progress Bar\r\n printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50)\r\n\r\n # Sample Output\r\n Progress: |█████████████████████████████████████████████-----| 90.0% Complete\r\n Progress: |█████████████████████████████████████████████-----| 90/100 Complete\r\n \"\"\"\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n if mode == \"percentage\":\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\r\n elif mode == \"counts\":\r\n print('\\r%s |%s| %d / %d %s' % (prefix, bar, iteration, total, suffix), end='\\r')\r\n # Print New Line on Complete\r\n if iteration >= total:\r\n print('\\nended')\r\n\r\ndef alpha(duration=400, amplitude=150, tau1=50, tau2=100, ts=0.1, force=\"positive\"):\r\n \"\"\"Returns a double exponential alpha function given parameters\"\"\"\r\n T = np.arange(0, duration+ts, ts)\r\n A = np.exp(-T/tau2) - np.exp(-T/tau1)\r\n if tau2==tau1:\r\n return np.zeros_like(T)\r\n elif force == \"positive\" and tau2<=tau1:\r\n # this will get us an alpha function with negative amp\r\n return np.zeros_like(T)\r\n elif force==\"negative\" and tau2>=tau1:\r\n # this will get us an alpha function with positive amp\r\n return np.zeros_like(T)\r\n\r\n A = A / np.max(np.abs(A)) * amplitude\r\n return A\r\n\r\ndef alphag(duration=400, amplitude=150, tau=100, ts=0.1):\r\n \"\"\"Returns a single exponential alpha function given parameters\"\"\"\r\n T = np.arange(0, duration+ts, ts)\r\n G = (T/tau) * np.exp(1 - T/tau)\r\n G = G / np.max(np.abs(G)) * amplitude\r\n return G\r\n\r\ndef get_alpha_duration(A, ts=0.1, thresh=0.95):\r\n \"\"\"Find duration of alpha function curve, when the value falls below a\r\n threshold fraction (e.g. 0.95 or about 3 taus) of the amplitude\"\"\"\r\n amplitude = np.max(A)\r\n amplitude_ind = np.argmax(A)\r\n index = np.where(A <= ((1.-thresh) * amplitude))[0]\r\n index = index[index > amplitude_ind]\r\n if not isempty(index):\r\n return index[0]*ts\r\n else:\r\n return None # beyond the scope of the curve. Cannot calculate\r\n\r\n\r\ndef fit_exp_with_offset(x, y, sort=False):\r\n \"\"\"\r\n Fitting y = a * exp( b * x) + c\r\n using non-iterative method based on\r\n Regressions et Equations Integrales by Jean Jacquelin\r\n https://math.stackexchange.com/questions/2318418/initial-guess-for-fitting-exponential-with-offset\r\n \"\"\"\r\n if sort:\r\n # Sorting (x, y) such that x is increasing\r\n X, _ = sortrows(np.c_[x, y], col=0)\r\n x, y = X[:, 0], X[:, 1]\r\n # Start algorithm\r\n S = np.zeros_like(x)\r\n S[1:] = 0.5 * (y[:-1] + y[1:]) * np.diff(x)\r\n S = np.cumsum(S)\r\n #for k in range(1, len(S)):\r\n # S[k] = S[k-1] + 1/2 * (y[k] + y[k-1]) * (x[k] - x[k-1])\r\n\r\n M = np.empty((2, 2))\r\n N = np.empty((2, 1))\r\n\r\n # Getting b\r\n M[0, 0] = np.sum((x - x[0])**2)\r\n M[0, 1] = np.sum((x - x[0]) * S)\r\n M[1, 0] = M[0, 1]\r\n M[1, 1] = np.sum(S**2)\r\n\r\n\r\n N[0, 0] = np.sum((y - y[0]) * (x - x[0]))\r\n N[1, 0] = np.sum((y - y[0]) * S)\r\n\r\n B = np.matmul(np.linalg.inv(M), N)\r\n b = B[1, 0]\r\n\r\n # Getting a and c\r\n M[0, 0] = len(x)\r\n M[0, 1] = np.sum(np.exp(b*x))\r\n M[1, 0] = M[0, 1]\r\n M[1, 1] = np.sum(np.exp(2*b*x))\r\n\r\n N[0, 0] = np.sum(y)\r\n N[1, 0] = np.sum(y * np.exp(b*x))\r\n AC = np.matmul(np.linalg.inv(M), N)\r\n c = AC[0, 0]\r\n a = AC[1, 0]\r\n\r\n return a, b, c\r\n\r\ndef fit_double_exp(x, y, sort=False):\r\n \"\"\"\r\n Fitting y = b * exp(p * x) + c * exp(q * x)\r\n Implemented based on:\r\n Regressions et Equations Integrales by Jean Jacquelin\r\n \"\"\"\r\n if sort:\r\n # Sorting (x, y) such that x is increasing\r\n X, _ = sortrows(np.c_[x, y], col=0)\r\n x, y = X[:, 0], X[:, 1]\r\n # Start algorithm\r\n n = len(x)\r\n S = np.zeros_like(x)\r\n S[1:] = 0.5 * (y[:-1] + y[1:]) * np.diff(x)\r\n S = np.cumsum(S)\r\n SS = np.zeros_like(x)\r\n SS[1:] = 0.5 * (S[:-1] + S[1:]) * np.diff(x)\r\n SS = np.cumsum(SS)\r\n\r\n # Getting the parameters\r\n M = np.empty((4, 4))\r\n N = np.empty((4, 1))\r\n\r\n M[:, 0] = np.array([np.sum(SS**2), np.sum(SS * S), np.sum(SS * x), np.sum(SS)])\r\n\r\n M[0, 1] = M[1, 0]\r\n M[1:,1] = np.array([np.sum(S**2), np.sum(S * x), np.sum(S)])\r\n\r\n M[:2,2] = M[2, :2]\r\n M[2, 2] = np.sum(x**2)\r\n\r\n M[:3,3] = M[3,:3]\r\n M[3, 3] = n\r\n\r\n N[:, 0] = np.array([np.sum(SS * y), np.sum(S * y), np.sum(x * y), np.sum(y)])\r\n\r\n # Regression for p and q\r\n ABCD = np.matmul(np.linalg.inv(M), N)\r\n #set_trace()\r\n A, B, C, D = ABCD.flatten()\r\n p = 0.5 * (B + np.sqrt(B**2 + 4 * A))\r\n q = 0.5 * (B - np.sqrt(B**2 + 4 * A))\r\n\r\n # Regression for b, c\r\n I = np.empty((2, 2))\r\n J = np.empty((2, 1))\r\n\r\n beta = np.exp(p * x)\r\n eta = np.exp(q * x)\r\n I[0, 0] = np.sum(beta**2)\r\n I[1, 0] = np.sum(beta * eta)\r\n I[0, 1] = I[1, 0]\r\n I[1, 1] = np.sum(eta**2)\r\n\r\n\r\n J[:, 0] = [np.sum(y * beta), np.sum(y * eta)]\r\n\r\n bc = np.matmul(np.linalg.inv(I), J)\r\n b, c = bc.flatten()\r\n\r\n return b, c, p, q\r\n\r\n\r\ndef fit_double_exp_with_offset(x, y, sort=False):\r\n \"\"\"\r\n Fitting y = a + b * exp(p * x) + c * exp(q * x)\r\n Implemented based on:\r\n https://math.stackexchange.com/questions/2249200/exponential-regression-with-two-terms-and-constraints\r\n \"\"\"\r\n if sort:\r\n # Sorting (x, y) such that x is increasing\r\n X, _ = sortrows(np.c_[x, y], col=0)\r\n x, y = X[:, 0], X[:, 1]\r\n # Start algorithm\r\n n = len(x)\r\n S = np.zeros_like(x)\r\n S[1:] = 0.5 * (y[:-1] + y[1:]) * np.diff(x)\r\n S = np.cumsum(S)\r\n SS = np.zeros_like(x)\r\n SS[1:] = 0.5 * (S[:-1] + S[1:]) * np.diff(x)\r\n SS = np.cumsum(SS)\r\n\r\n # Getting the parameters\r\n M = np.empty((5, 5))\r\n N = np.empty((5, 1))\r\n\r\n M[:, 0] = np.array([np.sum(SS**2), np.sum(SS * S), np.sum(SS * x**2), np.sum(SS * x), np.sum(SS)])\r\n\r\n M[0, 1] = M[1, 0]\r\n M[1:,1] = np.array([np.sum(S**2), np.sum(S * x**2), np.sum(S * x), np.sum(S)])\r\n\r\n M[0, 2] = M[2, 0]\r\n M[1, 2] = M[2, 1]\r\n M[2:,2] = np.array([np.sum(x**4), np.sum(x**3), np.sum(x**2)])\r\n\r\n M[:3,3] = M[3,:3]\r\n M[3, 3] = M[4, 2]\r\n M[4, 3] = np.sum(x)\r\n\r\n M[:4, 4] = M[4, :4]\r\n M[4, 4] = n\r\n\r\n N[:, 0] = np.array([np.sum(SS * y), np.sum(S * y), np.sum(x**2 * y), np.sum(x * y), np.sum(y)])\r\n\r\n # Regression for p and q\r\n ABCDE = np.matmul(np.linalg.inv(M), N)\r\n A, B, C, D, E = ABCDE.flatten()\r\n p = 0.5 * (B + np.sqrt(B**2 + 4 * A))\r\n q = 0.5 * (B - np.sqrt(B**2 + 4 * A))\r\n\r\n # Regression for a, b, c\r\n I = np.empty((3, 3))\r\n J = np.empty((3, 1))\r\n\r\n I[0, 0] = n\r\n I[1, 0] = np.sum(np.exp(p * x))\r\n I[2, 0] = np.sum(np.exp(q * x))\r\n I[0, 1] = I[1, 0]\r\n I[1, 1] = np.sum(I[1, 0]**2)\r\n I[2, 1] = np.sum(I[1, 0] * I[2, 0])\r\n I[0, 2] = I[2, 0]\r\n I[1, 2] = I[2, 1]\r\n I[2, 2] = np.sum(I[2, 0]**2)\r\n\r\n J[:, 0] = [np.sum(y), np.sum(y * I[1, 0]), np.sum(y * I[2, 0])]\r\n\r\n abc = np.matmul(np.linalg.inv(I), J)\r\n a, b, c = abc.flatten()\r\n\r\n return a, b, c, p, q\r\n\r\n\r\ndef fit_gaussian_non_iter(x, y, sort=False):\r\n \"\"\"\r\n Fitting Gaussian y = 1 / (sigma * sqrt(2 * pi)) * exp( -1/2 * ( (x - mu) / sigma )^2 )\r\n using non-iterative method based on\r\n Regressions et Equations Integrales by Jean Jacquelin\r\n \"\"\"\r\n if sort:\r\n # Sorting (x, y) such that x is increasing\r\n X, _ = sortrows(np.c_[x, y], col=0)\r\n x, y = X[:, 0], X[:, 1]\r\n # Start algorithm\r\n S = np.zeros_like(x)\r\n S[1:] = 0.5 * (y[:-1] + y[1:]) * np.diff(x)\r\n S = np.cumsum(S)\r\n T = np.zeros_like(x)\r\n x_y = x * y\r\n T[1:] = 0.5 * ( x_y[:-1] + x_y[1:] ) * np.diff(x)\r\n T = np.cumsum(T)\r\n\r\n # S1 = np.zeros_like(x)\r\n # T1 = np.zeros_like(x)\r\n # for k in range(1, len(S1)):\r\n # S1[k] = S1[k-1] + 1/2 * (y[k] + y[k-1]) * (x[k] - x[k-1])\r\n # T1[k] = T1[k-1] + 1/2 * (y[k]*x[k] + y[k-1]*x[k-1]) * (x[k] - x[k-1])\r\n\r\n M = np.empty((2, 2))\r\n N = np.empty((2, 1))\r\n\r\n # Getting the parameters\r\n M[0, 0] = np.sum(S**2)\r\n M[0, 1] = np.sum(S * T)\r\n M[1, 0] = M[0, 1]\r\n M[1, 1] = np.sum(T**2)\r\n\r\n N[0, 0] = np.sum((y - y[0]) * S)\r\n N[1, 0] = np.sum((y - y[0]) * T)\r\n AB = np.matmul(np.linalg.inv(M), N)\r\n A = AB[0, 0]\r\n B = AB[1, 0]\r\n\r\n mu = - A / B\r\n\r\n sigma = np.sqrt(-1 / B)\r\n\r\n return mu, sigma\r\n\r\ndef pairwise_diff(A, B):\r\n \"\"\" Calculate the pair wise difference between each element of the\r\n two vectors of length N and M, respectively, and return a matrix of shape\r\n M x N\r\n * A: vector of length M\r\n * B: vector of length N\r\n \"\"\"\r\n A = np.asarray(A).flatten()[:, np.newaxis]\r\n B = np.asarray(B).flatten()[np.newaxis,:]\r\n return A-B\r\n\r\ndef gaussian_kernel(ts, sigma=300., n=5, normalize=False):\r\n \"\"\"Make gaussian kernel centered at 0\r\n ts: sampling rate [ms]\r\n n: use n standard deviations below and above 0 (mean).\r\n sigma: standard deviation (width of Gaussian kernel) [ms].\r\n During Up state, sigma = 10ms according to:\r\n Neske, G.T., Patrick, S.L., Connor, B.W. Contributions of\r\n Diverse Excitatory and Inhibitory Neurons to Recurrent Network\r\n Activity in Cerebral Cortex. The Journal of Nueroscience.\r\n 35(3): 1089-1105 (2015). But this sd size may be too small for\r\n other processes. So default is set to 300ms for a smoother\r\n firing rate curve.\r\n normalize: Default False\r\n - 'area': normalize the area under the curve to be 1\r\n - 'peak': normalize the peak of curve to be 1\r\n \"\"\"\r\n t = np.arange(-n*sigma, n*sigma+ts, ts)\r\n w = 1./(np.sqrt(2.*np.pi)*sigma)*np.exp(-t**2/(2.*sigma**2))\r\n if normalize:\r\n if normalize == 'peak':\r\n w = w / np.max(w)\r\n elif normalize == 'area':\r\n w = w / np.sum(w)\r\n else:\r\n pass\r\n return(t, w)\r\n\r\ndef exists(obj):\r\n \"\"\"Check if a variable exists\"\"\"\r\n if obj in locals() or obj in globals():\r\n return True\r\n else:\r\n return False\r\n\r\ndef spm_matrix_2d(P, order='T*R*Z*S'):\r\n \"\"\"\r\n returns an affine transformation matrix\r\n\r\n Arguments:\r\n * P[0] - x translation\r\n * P[1] - y translation\r\n * P[2] - rotation (radians)\r\n * P[3] - x scaling\r\n * P[4] - y scaling\r\n * P[5] - affine\r\n * order (optional): application order of transformations.\r\n\r\n Returns:\r\n * A: affine transformation matrix\r\n\r\n To transform X = [x;y] coordinate, pad 1 at the end,\r\n i.e. X = [x; y; 1], then Y = A * X, where X, Y are 3 x n matrices\r\n \"\"\"\r\n # Pad matrix in case only a subset is being specified\r\n q = np.array([0, 0, 0, 1, 1, 0])\r\n P = np.concatenate((P, q[(len(P)):]))\r\n T = np.array([\r\n [1, 0, P[0]],\r\n [0, 1, P[1]],\r\n [0, 0, 1]\r\n ])\r\n\r\n R = np.array([\r\n [np.cos(P[2]), np.sin(P[2]), 0],\r\n [-np.sin(P[2]), np.cos(P[2]), 0],\r\n [0, 0, 1]\r\n ])\r\n\r\n Z = np.array([\r\n [P[3], 0, 0],\r\n [0, P[4], 0],\r\n [0, 0, 1]\r\n ])\r\n\r\n S = np.array([\r\n [1, P[5], 0],\r\n [0 , 1, 0],\r\n [0, 0, 1]\r\n ])\r\n\r\n order = order.replace('*', '@')\r\n A = eval(order)\r\n # Sanity check\r\n if not isnumeric(A).all() or A.ndim != 2 or any(np.array(A.shape) != 3):\r\n raise(ValueError('Order expression \"{}\" did not return a valid 3x3 matrix.'.format(order)))\r\n\r\n return A\r\n\r\ndef spm_matrix(P, order='T*R*Z*S'):\r\n \"\"\"\r\n returns an affine transformation matrix\r\n\r\n Arguments:\r\n * P[0] - x translation\r\n * P[1] - y translation\r\n * P[2] - z translation\r\n * P[3] - x rotation about - {pitch} (radians)\r\n * P[4] - y rotation about - {roll} (radians)\r\n * P[5] - z rotation about - {yaw} (radians)\r\n * P[6] - x scaling\r\n * P[7] - y scaling\r\n * P[8] - z scaling\r\n * P[9] - x affine\r\n * P[10] - y affine\r\n * P[11] - z affine\r\n * order (optional) application order of transformations.\r\n\r\n Returns:\r\n * A: affine transformation matrix\r\n\r\n To transform X = [x;y;z] coordinate, pad 1 at the end,\r\n i.e. X = [x; y; z; 1], then Y = A * X, where X, Y are 4xn matrices\r\n \"\"\"\r\n # Pad matrix in case only a subset is being specified\r\n q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])\r\n P = np.concatenate((P, q[(len(P)):]))\r\n # Specify the matrices\r\n T = np.array([\r\n [1, 0, 0, P[0]],\r\n [0, 1, 0, P[1]],\r\n [0, 0, 1, P[2]],\r\n [0, 0, 0, 1 ]\r\n ])\r\n R1 = np.array([\r\n [1, 0, 0, 0],\r\n [0, np.cos(P[3]), np.sin(P[3]), 0],\r\n [0, -np.sin(P[3]), np.cos(P[3]), 0],\r\n [0, 0, 0, 1]\r\n ])\r\n\r\n R2 = np.array([\r\n [np.cos(P[4]), 0, np.sin(P[4]), 0],\r\n [0, 1, 0, 0],\r\n [-np.sin(P[4]), 0, np.cos(P[4]), 0],\r\n [0, 0, 0, 1]\r\n ])\r\n\r\n R3 = np.array([\r\n [np.cos(P[5]), np.sin(P[5]), 0, 0],\r\n [-np.sin(P[5]), np.cos(P[5]), 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]\r\n ])\r\n R = R1 @ R2 @ R3\r\n\r\n Z = np.array([\r\n [P[6], 0, 0, 0],\r\n [0, P[7], 0, 0],\r\n [0, 0, P[8], 0],\r\n [0, 0, 0, 1]\r\n ])\r\n\r\n S = np.array([\r\n [1, P[9], P[10], 0],\r\n [0, 1, P[11], 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]\r\n ])\r\n\r\n order = order.replace('*', '@')\r\n A = eval(order)\r\n # Sanity check\r\n if not isnumeric(A).all() or A.ndim != 2 or any(np.array(A.shape) != 4):\r\n raise(ValueError('Order expression \"{}\" did not return a valid 4x4 matrix.'.format(order)))\r\n\r\n return A\r\n\r\n\r\ndef ks_test_survival(s1, s2, n, m, alpha=0.05, alpha_type=1):\r\n \"\"\"\r\n Kolmogorov-Smirnov test on 2 survival curves\r\n \"\"\"\r\n D = max(abs(s1 - s2))\r\n en = np.sqrt((n+m)/(n*m))\r\n c_alpha = np.sqrt(-1/2*np.log(alpha))\r\n D_critical = c_alpha * en\r\n if alpha_type==1:\r\n p = np.exp((D / en)**2 * (-2))\r\n else:\r\n p = stats.distributions.kstwobign.sf((1/en + 0.12+0.11 * en) * D)\r\n\r\n return D, p, D_critical\r\n\r\ndef faster_corr(X, Y):\r\n \"\"\"\r\n faster way to compute Pearson's correlation\r\n between corresponding columns of two matrices\r\n http://stackoverflow.com/questions/9262933/what-is-a-fast-way-to-compute-column-by-column-correlation-in-matlab\r\n \"\"\"\r\n X = X - np.mean(X, axis=0, keepdims=True) # zero mean\r\n Y = Y - np.mean(Y, axis=0, keepdims=True) # zero mean\r\n X = X / np.sqrt(np.sum(X**2, axis=0, keepdims=True)) # L2 normalization\r\n Y = Y / np.sqrt(np.sum(Y**2, axis=0, keepdims=True)) # L2 normalization\r\n R = np.sum(X*Y, axis=0)\r\n\r\n return R\r\n\r\ndef faster_cross_corr(X, Y):\r\n \"\"\"\r\n faster way to compute Pearson's correlation\r\n between each combination of paris of two matrices\r\n returns a cross correlation matrix\r\n [[x1:y1, x1:y2, x1:y3,...., x1:yn],\r\n [x2:y1, x2:y2, x2:y3,...., x1:yn],\r\n [...]\r\n [xn:y1, xn:y2, xn:y3,...., xn:yn]\r\n \"\"\"\r\n X = X - np.mean(X, axis=0, keepdims=True) # zero mean\r\n Y = Y - np.mean(Y, axis=0, keepdims=True) # zero mean\r\n X = X / np.sqrt(np.sum(X**2, axis=0, keepdims=True)) # L2 normalization\r\n Y = Y / np.sqrt(np.sum(Y**2, axis=0, keepdims=True)) # L2 normalization\r\n R = X.T.dot(Y)\r\n\r\n return R\r\n\r\n\r\ndef r2z(R, return_q=False):\r\n \"\"\"\r\n Convert Pearson correlation to Z score and get a P value\r\n\r\n z = arctanh(R)\r\n p = Gaussian(mu=0, sigma=1)(z)\r\n q = NormCDF(mu=0, sigma=1)(z)\r\n = (1. + erf(z / sqrt(2.))) / 2.\r\n where erf = 2/sqrt(pi)*integral(exp(-t**2), t=0..z).\r\n\r\n Parameters\r\n ----------\r\n R : Pearson correlation between -1 and 1\r\n return_q: if return cumulative probability as well\r\n\r\n Returns\r\n -------\r\n z: z score\r\n p: p value\r\n q: cumulative distribution of the z score (only returned if return_q=True)\r\n \"\"\"\r\n assert R <=1 and R>=-1\r\n\r\n z = np.arctanh(R)\r\n p = (1/np.sqrt(2*np.pi)) * np.exp(-z**2/2) # Gaussian N(0, 1)\r\n if return_q:\r\n q = stats.norm.cdf(z)\r\n return z, p, q\r\n else:\r\n return z, p\r\n\r\ndef sparse_memory_usage(mat):\r\n \"\"\"\r\n Based on https://stackoverflow.com/questions/43681279/why-is-scipy-sparse-matrix-memory-usage-indifferent-of-the-number-of-elements-in\r\n \"\"\"\r\n if mat.format in ('csr', 'csc'):\r\n return mat.data.nbytes + mat.indptr.nbytes + mat.indices.nbytes\r\n elif mat.format == 'coo':\r\n return mat.data.nbytes + mat.row.nbytes + mat.col.nbytes\r\n elif mat.format == 'lil':\r\n return mat.data.nbytes + mat.rows.nbytes\r\n else:\r\n return sys.getsizeof(mat)\r\n\r\ndef cosine_similarities(mat):\r\n \"\"\"\r\n Compute pairiwise cosine similarities between columns\r\n given sparse matrix mat\r\n \"\"\"\r\n import sklearn.preprocessing as pp\r\n col_normed_mat = pp.normalize(mat.tocsc(), axis=0)\r\n return col_normed_mat.T * col_normed_mat\r\n\r\ndef cosine_similarities_ab(A, B):\r\n \"\"\"\r\n Compute cosine similarities between matrix A and B.\r\n\r\n Parameters\r\n ----------\r\n A : np.ndarray\r\n Left matrix of shape (M x K).\r\n B : np.ndarray\r\n Right matrix of shape (N x K).\r\n\r\n Returns\r\n -------\r\n Cosine similarity matrix of shape (M x N).\r\n\r\n \"\"\"\r\n # L2 normalize\r\n A = A / np.sqrt(np.sum(A**2, axis=1, keepdims=True))\r\n B = B / np.sqrt(np.sum(B**2, axis=1, keepdims=True))\r\n return A @ B.T\r\n\r\ndef jaccard_similarities(mat):\r\n \"\"\"\r\n Compute pairwise Jaccard similarities between columns\r\n given sparse matrix mat\r\n \"\"\"\r\n cols_sum = mat.getnnz(axis=0)\r\n ab = mat.T * mat\r\n\r\n # for rows\r\n aa = np.repeat(cols_sum, ab.getnnz(axis=0))\r\n # for columns\r\n bb = cols_sum[ab.indices]\r\n\r\n similarities = ab.copy()\r\n similarities.data /= (aa + bb - ab.data)\r\n\r\n return similarities\r\n\r\n\r\ndef kl_divergence_pairwise(P):\r\n \"\"\"\r\n Compute pairwise KL divergence in P, where\r\n each row of P is a probability distribution.\r\n \r\n Using formula:\r\n D_KL(P, Q) = H(P, Q) - H(P) \r\n = (-\\sum_i p_i \\log q_i) - (-\\sum p_i \\log p_i)\r\n \r\n Returns an N x N matrix where N is the number of rows\r\n \"\"\"\r\n H_p = -np.sum(P * np.log(P), axis=1, keepdims=True)\r\n H_p_q = -(P @ np.log(P.T))\r\n D_kl = H_p_q - H_p\r\n return D_kl\r\n\r\n\r\ndef harmonic_approx(n):\r\n \"\"\"Returns an approximate value of n-th harmonic number.\r\n\r\n http://en.wikipedia.org/wiki/Harmonic_number\r\n \"\"\"\r\n if n == 0:\r\n return 0\r\n # Euler-Mascheroni constant\r\n gamma = 0.57721566490153286060651209008240243104215933593992\r\n return gamma + np.log(n) + 0.5/n - 1./(12*n**2) + 1./(120*n**4)\r\n\r\n\r\ndef cubic_trig(b, c, d):\r\n \"\"\"\r\n Trigonometric solution to a cubic function, assuming the form\r\n x^3 + b x^2 + c x + d = 0\r\n \r\n The coefficients can be of any shape (n1, n2, n3, ...), \r\n and the solutions will be an output of dimension (n1, n2, n3, ..., 3)\r\n where the 3 solutions (both complex and real) are stacked in the \r\n last dimension\r\n \r\n Reference: \"A Generalized Trigonometric Solution of the Cubic Equation\"\r\n https://www.jstor.org/stable/2968419?seq=1#metadata_info_tab_contents\r\n \"\"\"\r\n # Transform to depressed form\r\n b_sq = b * b\r\n b_cb = b_sq * b\r\n b_3 = b/3\r\n p = c-b_sq/3\r\n q = 2/27*b_cb-1/3*b*c+d\r\n \r\n # Solve with complex trigonometric functions\r\n phi = 1/3*np.arcsin((3*q/(-2*p) * np.lib.scimath.sqrt(3/(-p))).astype(np.complex))\r\n f = -2 * np.sign(p) * np.lib.scimath.sqrt(-p/3)\r\n x1 = f * np.sin(phi) - b_3\r\n x2 = f * np.sin(phi + 2*np.pi/3) - b_3\r\n x3 = f * np.sin(phi + 4*np.pi/3) - b_3\r\n\r\n X = np.dstack([x1, x2, x3])\r\n return X\r\n\r\n\r\ndef dict_list_to_list_dict(DL):\r\n\t\"\"\"\r\n\tDict of list to list of dict\r\n\tDL = {\r\n\t\t\"a\": [5, 6, 7],\r\n\t\t\"b\": [7, 6, 5],\r\n\t\t\"c\": [2, 3, 4],\r\n\t}\r\n\t->\r\n\tLD = [\r\n\t\t{\"a\": 5, \"b\": 7, \"c\": 2},\r\n\t\t{\"a\": 6, \"b\": 6, \"c\": 3},\r\n\t\t{\"a\": 7, \"b\": 5, \"c\": 4},\r\n\t]\r\n\t\"\"\"\r\n\tv = [dict(zip(DL,t)) for t in zip(*DL.values())]\r\n\treturn v\r\n\r\n\r\n\r\ndef list_dict_to_dict_list(LD):\r\n\t\"\"\"\r\n\tConvert list of dicts to dict of list\r\n\tLD = [\r\n\t\t{\"a\": 5, \"b\": 7, \"c\": 2},\r\n\t\t{\"a\": 6, \"b\": 6, \"c\": 3},\r\n\t\t{\"a\": 7, \"b\": 5, \"c\": 4},\r\n\t]\r\n\t->\r\n\tDL = {\r\n\t\t\"a\": [5, 6, 7],\r\n\t\t\"b\": [7, 6, 5],\r\n\t\t\"c\": [2, 3, 4],\r\n\t}\r\n\t\"\"\"\r\n\tv = {k: [dic[k] for dic in LD] for k in LD[0]}\r\n\treturn v\r\n\r\n\r\nif __name__ == '__main__':\r\n import matplotlib.pyplot as plt\r\n # A = np.array([[2, 3], [1,2], [1, 2], [3, 2], [4,5], [3,1], [1,2], [2,3]])\r\n # A = ['a','b','a','c','a','b','c']\r\n # C, IA, IC = uniquerows(A)\r\n x = np.arange(0, 10, 0.1)\r\n #f0 = lambda x, a, b, c: a * np.exp(b*x)+c\r\n f0 = lambda x, a, b, c, p, q: a + b * np.exp(p * x) + c * np.exp(q * x)\r\n y = f0(x, 0, 2, 3, -1.6, 2) + np.random.randn(len(x))/10\r\n #a, b, c = fit_exp_with_offset(x, y)\r\n b, c, p, q = fit_double_exp(x, y)\r\n plt.scatter(x, y)\r\n plt.plot(x, f0(x, 0, b, c, p, q), 'r')\r\n plt.title('a={:.3f}, b={:.3f}, c={:.3f}'.format(0, b, c))\r\n\r\n# x = np.arange(-2.5, 7.1, 0.1)\r\n# f0 = lambda x, mu, sigma: 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp( -1/2 * ( (x - mu) / sigma )**2 )\r\n# y = f0(x, 2.08, 3.2) + np.random.randn(len(x))/100\r\n# mu, sigma= fit_gaussian_non_iter(x, y, sort=False)\r\n# plt.scatter(x, y)\r\n# plt.plot(x, f0(x, mu, sigma))\r\n# plt.title(r'$\\mu$={:.3f}, $\\sigma$={:.3f}'.format(mu, sigma))\r\n"
},
{
"alpha_fraction": 0.48249027132987976,
"alphanum_fraction": 0.5171560049057007,
"avg_line_length": 29.590909957885742,
"blob_id": "b9f97c91de1c8a8f63e7af358b3733d78c429e19",
"content_id": "5ca7cc3614bb306c41a57b669118c00fad54fed0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2827,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 88,
"path": "/PySynapse/util/image_util.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 27 19:24:54 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport numpy as np\r\nfrom MATLAB import *\r\nfrom ImportData import *\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom pdb import set_trace\r\n\r\ndef MPI(Img):\r\n \"\"\"Create Maximum Projection Intensity image from a stack of image\r\n Assuming z dimension is the third dimension\"\"\"\r\n return np.squeeze(np.max(Img, axis=2))\r\n\r\ndef FTimeSeries(Img, mask, mode=['F', 'dF','dF/F', 'hist']):\r\n \"\"\"Extract the time series from a image series given binary ROI mask\"\"\"\r\n if 0 < Img.ndim - mask.ndim < 2:\r\n mask = mask[:,:,np.newaxis]\r\n \r\n if isinstance(mode, str):\r\n mode = [mode]\r\n\r\n # Get data at ROI\r\n Img_masked = Img * mask\r\n Img_masked = np.ma.masked_where(Img_masked==0, Img_masked)\r\n # Dimensions\r\n for m in range(Img_masked.ndim-1):\r\n if m == 0:\r\n F = Img_masked\r\n \r\n F = F.mean(axis=0)\r\n \r\n # Calculate \r\n F = np.asarray(F)\r\n Fout = dict()\r\n for m in mode:\r\n if m == 'F':\r\n Fout[m] = F\r\n elif m == 'dF': # from Ben\r\n Fout[m] = F - F[1]\r\n elif m == 'dF/F': # from Ben\r\n Fout[m] = (F - F[1]) / F[1] * 100\r\n elif m == 'hist':\r\n I = Img_masked.compressed()\r\n n, b = np.histogram(I, int(len(I)/10)) # count, bin\r\n b = midpoint(b)\r\n Fout[m] = (n, b) \r\n \r\n return Fout\r\n \r\ndef makeSquareROIMask(roi, m, n):\r\n \"\"\"\r\n roi: roi object\r\n m: number of rows of the canvas\r\n n: number of columns of the canvas\r\n \"\"\"\r\n x = roi.position[:,0].squeeze()\r\n x = np.array([x[0], x[0], x[1], x[1]], x[0])\r\n y = roi.position[:,1].squeeze()\r\n y = np.array([y[0], y[1], y[1], y[0]], y[0])\r\n mask = poly2mask(y, x, m, n)\r\n return mask\r\n\r\nif __name__ == '__main__':\r\n # load the image\r\n img = 'D:/Data/2photon/2016/05.May/Image 4 May 2016/Slice B/Slice B CCh Double.512x200y75F.m1.img'\r\n zImage = ImageData(dataFile=img, old=True)\r\n # Load ROI\r\n roifile = 'D:/Data/2photon/2016/05.May/Image 4 May 2016/Slice B/Slice B Triple Doublet.512x200y75F.m2 ROI.roi'\r\n ROI = ROIData(roifile, old=True)\r\n # Get time series based on ROI\r\n m, n = zImage.Protocol.Height, zImage.Protocol.Width\r\n RESULTS = []\r\n for k, r in enumerate(ROI):\r\n # Convert ROI to mask\r\n x = r.position[:,0].squeeze()\r\n x = np.array([x[0], x[0], x[1], x[1]], x[0])\r\n y = r.position[:,1].squeeze()\r\n y = np.array([y[0], y[1], y[1], y[0]], y[0])\r\n mask = poly2mask(y, x, m, n)\r\n Fout = FTimeSeries(zImage.img, mask, mode=['F','dF','dF/F','hist'])\r\n RESULTS.append( Fout )\r\n \r\n plt.imshow(zImage.img[:,:,0] * (1-mask))\r\n \r\n \r\n "
},
{
"alpha_fraction": 0.5752915143966675,
"alphanum_fraction": 0.5903415083885193,
"avg_line_length": 46.140625,
"blob_id": "8c4869c50640dae96eda2b39ed24f9c3d105c91d",
"content_id": "f227a30309608f4216598f38f4bf85bc6d71efc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12093,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 256,
"path": "/EMCNA/EM.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 14 14:03:50 2014\n\nEM: proportion of tumor cells in a sample read\n\n@author: Edward\n\"\"\"\n\nimport numpy as np\nfrom numpy import log\nfrom numpy import sum as nsum\nfrom scipy.misc import comb as nchoosek\nfrom scipy.optimize import fminbound\nimport csv, sys\nfrom optparse import OptionParser\n\ndef EM_parse_opt():\n \"\"\"\n Parse optional arguments to the script\n \"\"\"\n usage = \"usage: %prog [options] shared_snp_count1.txt shared_snp_count2.txt ...\"\n parser = OptionParser(usage)\n parser.add_option(\"-r\",\"--chr2use\", dest=\"chr2use\", help=\"chromosome to use, e.g. chr2,chr7\", default=None)\n parser.add_option(\"-m\",\"--mutation\", dest=\"mutation\", help=\"type of mutation, either UPD or DEL, or specify the copy number in amplification; default DEL\", default='DEL')\n parser.add_option(\"-p\",\"--position\", dest=\"position_range\", help=\"range of positions on chromosome\", default=None)\n parser.add_option(\"-N\", \"--atleastN\", dest=\"N_at_least\", help=\"filter out SNPs conunts less than N\", default=0)\n parser.add_option(\"-k\",\"--maxiter\", dest=\"maxiter\", help=\"maximum number of iterations to run EM. Default 1000\", default=1000)\n parser.add_option(\"-o\",\"--output\", dest=\"output\", help=\"write to an output file, instead of stdout\", default=None)\n parser.add_option(\"-a\",\"--append\", dest=\"append\", help=\"append intead of overwriting the output file\", default=\"store_false\")\n parser.add_option(\"-i\",\"--id\", dest=\"id\", help=\"assign subject id to each input file, e.g. subj01,subj02,subj03\", default=None)\n options, args = parser.parse_args()\n if len(args) < 1:\n\t\tparser.error(\"Input at least 1 SNP count file required\")\n return(options, args)\n\ndef p_read(theta, N, X, B, mutation):\n # Parse the mutation string\n if isinstance(mutation,(str)):\n if mutation.isdigit():\n mutation = float(mutation)\n else:\n mutation = mutation.upper()\n # turn string input into numeric cases\n mutation = {\n 'DEL': 1.0,\n 'UPD': 2.0\n }.get(mutation,mutation)\n # raise exception if the result is not numeric\n if not isinstance(mutation,(int, long, float)):\n raise Exception(\"Unrecognized mutation type %s\" %(mutation))\n # probability of reads based on type of mutation\n if mutation == 1.0: # DEL\n p_A = B / (1.0-theta+B*theta)\n p_B = (B-B*theta) / (1.0-B*theta)\n elif mutation == 2.0: # UPD\n p_A = (B+B*theta) / (1.0-theta+2.0*B*theta)\n p_B = (B-B*theta) / (1.0+theta-2.0*B*theta)\n elif mutation > 2.0: # AMPLIFICATION\n p_A = ((mutation-2.0)*B*theta+theta) / ((mutation-2.0)*B*theta+1.0)\n p_B = ((mutation-1.0)*theta+(2.0-mutation)*B*theta) / ((mutation-2.0)*theta+(2.0-mutation)*B*theta+1.0)\n else: # catch other invalid copy numbers / mutations input\n raise Exception(\"Invalid copy number mutation %d\" %(mutation))\n f_A = nchoosek(N,X) * (p_A**X) * ((1.0-p_A)**(N-X))\n f_B = nchoosek(N,X) * (p_B**X) * ((1.0-p_B)**(N-X))\n return (p_A, p_B, f_A, f_B)\n\ndef EM_Clonal_Abundance(N, X, B=0.5, mutation='DEL', theta_tol=1E-9, maxiter=1000, consecutive_convergence= 10, full_output = False, disp=False):\n \"\"\"\n Inputs:\n N: a vector of total number of reads for each SNP\n X: a vector of total number of allele A reads, same length as N\n B: a vector of read bias, estimated by the number of reads of allele A \n divided by the total number of reads in normal sample. Default is .5\n mutation: type of mutation ['DEL' | 'UPD' | {numeric}], chromosomal \n deletion, uniparental disomy, or copy number (total number of \n copies of the chromosomes) in the case of amplification. \n Default 'DEL'.\n theta_tol (optional): tolerance of theta change for convergence, \n default is 1E-9\n maxiter: maximum number of iterations to run, default is 1000\n consecutive_convergence: number of times that the change of theta has to be\n less than that of theta_tol, consecutively, to be deemed convergence\n full_output: flag to return additional outputs\n disp: display each iteration of EM\n \n Output:\n theta: estimated proprotion of tumor cells (between 0 and 1)\n \n If full_output is set to True, the following can also be returned\n it_count: number of iterations used\n \n \"\"\"\n \n # Initialize / guess parameters\n theta = 0.5\n d_theta = 1.0\n it_count = 0\n d_theta_count = 0\n \n z_A = X/N # probability of z_A\n z_B = 1.0-z_A #probability of z_B\n \n # define objective functions for theta\n # Maximize theta so that the log likelihood is maximized\n # l(theta) = sum_i[sum_zi: Qi_zi*log(p(x_i, z_i; theta)/Qi_zi)]\n # l(theta) = sum_i[z_Ai*log(r_A) + z_Bi*log(r_B) + log(z_Ai*f_A + z_Bi*f_B)]\n def likelihood_func(theta, N, X, B, z_A, z_B, mutation):\n # Likelihood function for theta\n _, _, f_A, f_B = p_read(theta, N, X, B, mutation)\n l = -1.0*nsum(log(z_A*f_A + z_B*f_B))\n return l\n def likelihood_func_deriv(theta, N, X, B, z_A, z_B, mutation):\n # derivative of the likelihood of the function for theta\n p_A, p_B, f_A, f_B = p_read(theta, N, X, B, mutation)\n p_A_deriv = B * (1.0-B) * ((1-theta+B*theta) **(-2.0))\n p_B_deriv = ((1.0-B*theta) * (-B) - (B-B*theta) * (-B))/ ((1.0-B*theta)**2)\n f_A_deriv = nchoosek(N,X) * X *(p_A_deriv) * ((1.0-p_A)**(N-X)) + nchoosek(N,X) * (p_A**X) * (N-X) * (-p_A_deriv)\n f_B_deriv = nchoosek(N,X) * X *(p_B_deriv) * ((1.0-p_B)**(N-X)) + nchoosek(N,X) * (p_B**X) * (N-X) * (-p_B_deriv)\n l_deriv = -1.0*nsum(1.0/(z_A*f_A + z_B*f_B) * (z_A * f_A_deriv + z_B * f_B_deriv))\n return l_deriv\n \n while d_theta_count <= consecutive_convergence and it_count <= maxiter:\n it_count += 1\n # M-Step\n \n # r_A, r_B part\n # r_A = sum_i2K(z_Ai/N)\n r_A = nsum(z_A / np.size(z_A))\n r_B = nsum(z_B / np.size(z_B))\n\n # Maximize the log likelihood to estimate for theta, (minimize the \n # negative of the log-likelihood)\n xopt, fval, ierr, numfunc = fminbound(likelihood_func, 0.0, 1.0, args=(N, X, B, z_A, z_B, mutation), full_output=True)\n \n if disp:\n print(\"theta:%f, fval:%f, ierr:%d, numfunc:%d\" %(xopt, fval, ierr, numfunc))\n # returns a new theta\n d_theta = np.abs(xopt - theta)\n theta = xopt\n \n if d_theta<theta_tol:\n d_theta_count += 1\n else:# if not consecutive convergence, set convergence count to zero\n d_theta_count = 0 \n \n # E-Step\n # Set Q_i(Z_i) = p(z_i =1 | x_i ; theta)\n \n # Recalculate probabilities \n _, _, f_A, f_B = p_read(theta, N, X, B, mutation)\n f_X = r_A * f_A + r_B * f_B\n \n z_A = r_A * f_A / f_X\n z_B = 1.0 - z_A\n # end of while loop\n if it_count > maxiter and d_theta_count < 1:\n print(\"Theta not converged!\")\n if full_output:\n return (theta, it_count)\n else:\n return theta\n\n# Read in files\ndef EM_read_shared_snp_file(shared_snp_file, chr2use=None, position_range=None, \n pseudo_count=True, print_result=False, N_at_least=0, delimiter='\\t'):\n \"\"\"\n Read in an SNP call file, and calcualte parameters necessary for EM\n Inputs:\n shared_snp_file: delimited file (default tab delimited) with each row:\n chromosome, SNP position, reference normal, alternative normal, \n reference normal count, alternative normal count, reference tumor,\n alternative tumor, reference tumor count, alternative tumor count\n chr2use: filter chromosomes to retain in the analysis\n position_range: range of SNP position [min, max]\n pseudo_count: avoid zero reads when calcualting bias term by adding all\n the counts by 1 (True | False)\n print_result: print (filtered) result of calculations for each SNP\n N_at_least: only read in SNPs with total count at least this number.\n Default 0, read in everything\n delimiter: default '\\t' tab delimited\n Outputs: return the following as a tuple\n ch: chromosome name\n pos: position of SNP\n N: total number of reads in tumor cells\n X: total number of read of reference in tumor cells\n B: bias of reaading according to normal cell SNPs\n \"\"\"\n ch, pos, N, X, B = [], [], np.array([]), np.array([]), np.array([])\n with open(shared_snp_file, 'rb') as FID:\n csvfile = csv.reader(FID, delimiter=delimiter)\n for i, row in enumerate(csvfile):\n c, p, ref_norm, alt_norm, ref_norm_count, alt_norm_count, ref_tum, alt_tum, ref_tum_count, alt_tum_count = row\n p, ref_norm_count, alt_norm_count, ref_tum_count, alt_tum_count = int(p), float(ref_norm_count), float(alt_norm_count), float(ref_tum_count), float(alt_tum_count)\n # apply filter\n if (chr2use is not None) and (c not in chr2use):\n continue\n if (position_range is not None) and (position_range[0] > p or position_range[1] < p):\n continue\n if (ref_tum_count + alt_tum_count < N_at_least):\n continue\n if (ref_norm_count<1E-6 or alt_norm_count<1E-6) and pseudo_count:\n ref_norm_count, alt_norm_count = ref_norm_count+1.0, alt_norm_count+1.0\n ch.append(c)\n pos.append(int(p))\n N = np.append(N, ref_tum_count + alt_tum_count)\n X = np.append(X, ref_tum_count + pseudo_count)\n B = np.append(B, ref_norm_count / (ref_norm_count + alt_norm_count))\n if print_result:\n print(\"%s, %d, N:%d, X:%d, B:%f\\n\" %(ch[-1], pos[-1], N[-1], X[-1], B[-1]))\n # give warnings if reading is very biased\n if any(B < 1E-6) or any(B > 1-1E-6):\n print('Some readings are very biased: %s' %(B))\n FID.close()\n return (ch, pos, N, X, B)\n \ndef EM_main2():\n \"\"\"\n Estimate tumor percent given data\n \"\"\"\n options, file_lists = EM_parse_opt()\n if options.chr2use is not None:\n options.chr2use = list([r for r in options.chr2use.split(',')])\n if options.position_range is not None:\n options.position_range=np.array([int(p) for p in options.position_range.split(',')])\n if options.id is None:\n options.id = np.arange(1,len(file_lists)+1, 1)\n else:\n options.id = list([sid for sid in options.id.split(',')])\n # check if there are the same number of ids as input files\n if len(file_lists) != len(options.id):\n # use numerical indices\n options.id = np.arange(1,len(file_lists)+1, 1)\n if options.output is not None:\n if options.append:\n FID = open(options.output,'a')\n else:\n FID = open(options.output, 'wb')\n csvfile = csv.writer(FID,delimiter='\\t')\n csvfile.writerow([\"subject\",\"nSNPs\", \"chromosome\",\"mutation\",\"theta\"])\n else:\n sys.stdout.flush() # make sure it prints to shell\n print(\"\\t\".join([\"subject\",\"nSNPs\", \"chromosome\",\"mutation\",\"theta\"]))\n for s, f in enumerate(file_lists):\n ch, pos, N, X, B = EM_read_shared_snp_file(f, options.chr2use, options.position_range, pseudo_count=True, print_result=False, N_at_least=int(options.N_at_least)) \n theta, it_count = EM_Clonal_Abundance(N, X, B, mutation = options.mutation, theta_tol=1E-9, maxiter=1000, full_output = True, disp=False)\n if options.output is None:\n print(\"\\t\".join([options.id[s], np.size(ch), ','.join(np.unique(ch)), options.mutation, theta]))\n else:\n csvfile.writerow([options.id[s], np.size(ch), ','.join(np.unique(ch)), options.mutation, theta])\n if options.output is not None:\n FID.close()\n\n# Suppress output if imported as module\nif __name__==\"__main__\":\n EM_main2()\n \n \n \n \n \n"
},
{
"alpha_fraction": 0.6222810745239258,
"alphanum_fraction": 0.6324745416641235,
"avg_line_length": 46.478599548339844,
"blob_id": "747bffac4e6af595febb2f4538a19309f72bb9ed",
"content_id": "5fd93fe1977282577ca7053c76638a379c2539c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12469,
"license_type": "no_license",
"max_line_length": 243,
"num_lines": 257,
"path": "/Pycftool/FitOptions.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nA variety of interfaces for fit options\r\n\r\n\"\"\"\r\nimport sys\r\nimport os\r\n\r\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"../generic/\")))\r\n\r\nfrom MATLAB import *\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom QCodeEdit import QCodeEdit\r\nfrom ElideQLabel import ElideQLabel\r\n\r\n\r\nimport numpy as np\r\n\r\n\r\ndef str2numericHandleError(x):\r\n \"\"\"Wrapper fir str2numeric. Return original string instead of error \"\"\"\r\n try:\r\n return str2numeric(x)\r\n except:\r\n return x\r\n\r\nclass FitOptions(QtWidgets.QWidget):\r\n def __init__(self, parent=None, friend=None, method='2D: curve_fit', coefficients=['a', 'b', 'c']):\r\n super(FitOptions, self).__init__(parent)\r\n self.setWindowTitle(\"Fit Options\")\r\n self.parent = parent\r\n self.friend = friend\r\n self.isclosed = True # start off being closed\r\n self.method = method\r\n self.params = {'method': method}\r\n self.coefficients = coefficients\r\n\r\n # Set up GUI\r\n self.setLayout(QtWidgets.QVBoxLayout())\r\n\r\n # buttons for saving the settings and exiting the options window\r\n OK_button = QtWidgets.QPushButton('OK')\r\n OK_button.setDefault(True)\r\n OK_button.clicked.connect(lambda: self.updateSettings(closeWidget=True))\r\n Apply_button = QtWidgets.QPushButton('Apply')\r\n Apply_button.clicked.connect(lambda: self.updateSettings(closeWidget=False))\r\n Cancel_button = QtWidgets.QPushButton('Cancel')\r\n Cancel_button.clicked.connect(self.close)\r\n self.buttonGroup = QtWidgets.QGroupBox()\r\n self.buttonGroup.setLayout(QtWidgets.QHBoxLayout())\r\n self.buttonGroup.layout().addWidget(OK_button, 0)\r\n self.buttonGroup.layout().addWidget(Apply_button, 0)\r\n self.buttonGroup.layout().addWidget(Cancel_button, 0)\r\n\r\n # Populate the fit options regions with appropriate field\r\n self.initializeFitOptionsWidget()\r\n\r\n self.layout().addWidget(self.widgets)\r\n self.layout().addWidget(self.buttonGroup)\r\n\r\n def setMethod(self, method):\r\n if self.method == method:\r\n return\r\n self.method = method\r\n # remove the current widget\r\n self.layout().removeWidget(self.widgets)\r\n self.widgets.deleteLater()\r\n # re-initialize self.widgets\r\n self.initializeFitOptionsWidget()\r\n # Replace the current widget\r\n self.layout().insertWidget(0, self.widgets)\r\n # print('reset method to --> {}'.format(method))\r\n\r\n def setInitializationParameters(self, coefficients=['a','b','c','d']):\r\n self.coefficients = coefficients\r\n coef_table = QtWidgets.QTableWidget(0, 4)\r\n coef_table.verticalHeader().setVisible(False)\r\n coef_table.setHorizontalHeaderLabels(['Coefficients', 'StartingPoint', 'Lower', 'Upper'])\r\n coef_table.itemChanged.connect(lambda: self.refitCurve())\r\n coef_table.blockSignals(True) # blocking any signal while setting data\r\n\r\n coef_flags = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable\r\n\r\n for n, pp in enumerate(coefficients):\r\n coef_table.insertRow(n) # add a row\r\n # Coefficient label\r\n coef_label = QtWidgets.QTableWidgetItem(pp)\r\n # Initial Value\r\n initVal_item = QtWidgets.QTableWidgetItem(\"{:.5f}\".format(np.random.rand()))\r\n initVal_item.setFlags(coef_flags)\r\n lower_item = QtWidgets.QTableWidgetItem(str(-np.inf))\r\n lower_item.setFlags(coef_flags)\r\n upper_item = QtWidgets.QTableWidgetItem(str(np.inf))\r\n upper_item.setFlags(coef_flags)\r\n\r\n coef_table.setItem(n, 0, coef_label)\r\n coef_table.setItem(n, 1, initVal_item)\r\n coef_table.setItem(n, 2, lower_item)\r\n coef_table.setItem(n, 3, upper_item)\r\n\r\n coef_table.blockSignals(False) # release signal block\r\n\r\n # Resize table cells to fit contents\r\n coef_table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)\r\n coef_table.resizeColumnsToContents()\r\n coef_table.resizeRowsToContents()\r\n\r\n # Resetting parameter associated with coef table, if already initialized\r\n if hasattr(self, 'widgets') and hasattr(self.widgets, 'params'):\r\n # Delete the table\r\n table_row = self.widgets.layout().rowCount()-1\r\n self.widgets = self.friend.removeFromWidget(self.widgets, row=table_row)\r\n # Add back the new table\r\n self.widgets.layout().addWidget(coef_table, table_row, 0, 1, 2)\r\n # Update params\r\n self.widgets.params['coefficients'] = coef_table\r\n\r\n return coef_table\r\n\r\n def initializeFitOptionsWidget(self):\r\n if self.method == '2D: curve_fit': # General call to curve_fit\r\n self.widgets = self.curveFitOptionWidgets()\r\n elif self.method == '2D: gmm': # Gaussian mixture model\r\n self.widgets = QtWidgets.QGroupBox()\r\n elif self.method == '2D: fourier': # Mixture of fourier\r\n self.widgets = QtWidgets.QGroupBox()\r\n elif self.method == '3D: gmm': # 3D Gaussian mixture model\r\n self.widgets = QtWidgets.QGroupBox()\r\n else:\r\n self.widgets = QtWidgets.QGroupBox()\r\n\r\n def curveFitOptionWidgets(self):\r\n widgets = QtWidgets.QGroupBox()\r\n widgets.setLayout(QtWidgets.QGridLayout())\r\n\r\n # <editor-fold desc=\"Options\">\r\n method_label = QtWidgets.QLabel(\"Method:\")\r\n method_detail_label = QtWidgets.QLabel(\"NonlinearLeastSquares\")\r\n algorithm_label = QtWidgets.QLabel(\"Algorithm:\")\r\n algorithm_comboBox = QtWidgets.QComboBox()\r\n algorithm_comboBox.addItems([\"Trust-Region Reflective\", \"Levenberg-Marquardt\", \"Dog-Box\"])\r\n algorithm_comboBox.setItemData(0, \"Trust Region Reflective (TRF) algorithm,\\nparticularly suitable for large sparse problems with bounds.\\nGenerally robust method.\", QtCore.Qt.ToolTipRole)\r\n algorithm_comboBox.setItemData(1, \"Levenberg-Marquardt (LM) algorithm as implemented in MINPACK.\\nDoesn’t handle bounds and sparse Jacobians.\\nUsually the most efficient method for small unconstrained problems.\", QtCore.Qt.ToolTipRole)\r\n algorithm_comboBox.setItemData(2, \"Dogleg algorithm (dogbox) with rectangular trust regions,\\ntypical use case is small problems with bounds.\\nNot recommended for problems with rank-deficient Jacobian.\", QtCore.Qt.ToolTipRole)\r\n algorithm_comboBox.setToolTip(\"Expand to see details\")\r\n algorithm_comboBox.currentIndexChanged.connect(lambda: self.refitCurve())\r\n maxfev_label = QtWidgets.QLabel(\"maxfeval:\")\r\n maxfev_LineEdit = QtWidgets.QLineEdit(\"None\")\r\n maxfev_LineEdit.setToolTip(\"The maximum number of calls to the function.\\nSet to 0 for automatic calculation.\")\r\n maxfev_LineEdit.returnPressed.connect(lambda: self.refitCurve())\r\n loss_label = QtWidgets.QLabel(\"loss:\")\r\n loss_comboBox = QtWidgets.QComboBox()\r\n loss_comboBox.addItems(['linear', 'soft_l1', 'huber', 'cauchy', 'arctan'])\r\n loss_comboBox.setToolTip(\"Determines the loss function\")\r\n loss_comboBox.setItemData(0, \"rho(z) = z.\\nGives a standard least-squares problem.\", QtCore.Qt.ToolTipRole)\r\n loss_comboBox.setItemData(1, \"rho(z) = 2 * ((1 + z)**0.5 - 1).\\nThe smooth approximation of l1 (absolute value) loss.\\nUsually a good choice for robust least squares.\", QtCore.Qt.ToolTipRole)\r\n loss_comboBox.setItemData(2, \"rho(z) = z if z <= 1 else 2*z**0.5 - 1.\\nWorks similarly to ‘soft_l1’.\", QtCore.Qt.ToolTipRole)\r\n loss_comboBox.setItemData(3, \"rho(z) = ln(1 + z).\\nSeverely weakens outliers influence,\\nbut may cause difficulties in optimization process.\", QtCore.Qt.ToolTipRole)\r\n loss_comboBox.setItemData(4, \"rho(z) = arctan(z).\\nLimits a maximum loss on a single residual,\\nhas properties similar to ‘cauchy’.\", QtCore.Qt.ToolTipRole)\r\n loss_comboBox.currentIndexChanged.connect(lambda: self.refitCurve())\r\n ftol_label = QtWidgets.QLabel('ftol:')\r\n ftol_lineEdit = QtWidgets.QLineEdit('1.0e-08')\r\n ftol_lineEdit.setToolTip(\"Relative error desired in the sum of squares.\")\r\n ftol_lineEdit.returnPressed.connect(lambda: self.refitCurve())\r\n xtol_label = QtWidgets.QLabel(\"xtol:\")\r\n xtol_LineEdit = QtWidgets.QLineEdit('1.0e-08')\r\n xtol_LineEdit.setToolTip(\"Relative error desired in the approximate solution.\")\r\n xtol_LineEdit.returnPressed.connect(lambda: self.refitCurve())\r\n gtol_label = QtWidgets.QLabel(\"gtol:\")\r\n gtol_LineEdit = QtWidgets.QLineEdit(\"1.0e-08\")\r\n gtol_LineEdit.setToolTip(\"Orthogonality desired between the function vector\\nand the columns of the Jacobian.\")\r\n gtol_LineEdit.returnPressed.connect(lambda: self.refitCurve())\r\n # </editor-fold>\r\n\r\n # Coefficients table\r\n coef_table = self.setInitializationParameters(coefficients=self.coefficients)\r\n\r\n # <editor-fold desc=\"Adding widgets\">\r\n widgets.layout().addWidget(method_label, 0, 0, 1, 1)\r\n widgets.layout().addWidget(method_detail_label, 0, 1, 1, 1)\r\n widgets.layout().addWidget(algorithm_label, 1, 0, 1, 1)\r\n widgets.layout().addWidget(algorithm_comboBox, 1, 1, 1, 1)\r\n widgets.layout().addWidget(loss_label, 2, 0, 1, 1)\r\n widgets.layout().addWidget(loss_comboBox, 2, 1, 1, 1)\r\n widgets.layout().addWidget(maxfev_label, 3, 0, 1, 1)\r\n widgets.layout().addWidget(maxfev_LineEdit, 3, 1, 1, 1)\r\n widgets.layout().addWidget(ftol_label, 4, 0, 1, 1)\r\n widgets.layout().addWidget(ftol_lineEdit, 4, 1, 1, 1)\r\n widgets.layout().addWidget(xtol_label, 5, 0, 1, 1)\r\n widgets.layout().addWidget(xtol_LineEdit, 5, 1, 1, 1)\r\n widgets.layout().addWidget(gtol_label, 6, 0, 1, 1)\r\n widgets.layout().addWidget(gtol_LineEdit, 6, 1, 1, 1)\r\n widgets.layout().addWidget(coef_table, 7, 0, 1, 2)\r\n\r\n widgets.params = {'algorithm': algorithm_comboBox,\r\n 'loss': loss_comboBox,\r\n 'maxfev': maxfev_LineEdit,\r\n 'ftol': ftol_lineEdit,\r\n 'xtol': xtol_LineEdit,\r\n 'gtol': gtol_LineEdit,\r\n 'coefficients': coef_table}\r\n\r\n # </editor-fold>\r\n\r\n return widgets\r\n\r\n def gmmFitOptionWidgets(self):\r\n widgets = QtWidgets.QGroupBox()\r\n widgets.setLayout(QtWidgets.QGridLayout())\r\n\r\n def getParams(self):\r\n \"\"\"From self.widget\"\"\"\r\n for key, widget in self.widgets.params.items():\r\n if isinstance(widget, QtWidgets.QLineEdit):\r\n val = str2numericHandleError(widget.text())\r\n elif isinstance(widget, QtWidgets.QComboBox):\r\n val = str2numericHandleError(widget.currentText())\r\n elif isinstance(widget, QtWidgets.QLabel):\r\n val = str2numericHandleError(widget.text())\r\n elif isinstance(widget, QtWidgets.QCheckBox):\r\n val = True if widget.checkState() > 0 else False\r\n elif isinstance(widget, QtWidgets.QSpinBox):\r\n val = widget.value()\r\n\r\n elif isinstance(widget, QtWidgets.QTableWidget):\r\n val = {}\r\n numcols = widget.columnCount()\r\n for r in range(widget.rowCount()):\r\n current_coef = widget.item(r, 0).text()\r\n val[current_coef] = [str2numericHandleError(widget.item(r, c).text()) for c in range(1, numcols)]\r\n else:\r\n raise (TypeError('Unrecognized type of setting item'))\r\n\r\n self.params[key] = val\r\n\r\n def refitCurve(self):\r\n print('refitting...')\r\n if self.friend is None or not self.friend.autofit:\r\n return # don't do anything if not auto fitting\r\n#\r\n self.friend.curveFit()\r\n\r\n def updateSettings(self, closeWidget=False):\r\n self.getParams() # Get the parameters\r\n if closeWidget:\r\n self.isclosed = True\r\n self.close()\r\n\r\n def closeEvent(self, event):\r\n \"\"\"Override default behavior when closing the main window\"\"\"\r\n self.isclosed = True\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n ex = FitOptions()\r\n ex.show()\r\n sys.exit(app.exec_())\r\n"
},
{
"alpha_fraction": 0.6031789779663086,
"alphanum_fraction": 0.6119484901428223,
"avg_line_length": 28.66386604309082,
"blob_id": "a98006ee64f93ae16ec45293fabaadacfe01ce4a",
"content_id": "bbb606f06ec144079e2368df7f1f02763dad901a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3649,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 119,
"path": "/generic/SPARK.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 13 14:25:03 2020\r\n\r\nSnippets for Spark\r\n\r\n@author: cudo9001\r\n\"\"\"\r\nimport os\r\nimport pandas as pd\r\nimport dask as dd\r\nfrom functools import reduce\r\nfrom pyspark.sql import DataFrame\r\nfrom pyspark.sql import functions as F\r\n\r\n# Union all dataframes\r\ndef expr(mycols, allcols):\r\n \"\"\"\r\n Fill missing columns with nan (map)\r\n For unionAll(columns, df1, df2, df3)\r\n \"\"\"\r\n def processCols(colname):\r\n if colname in mycols:\r\n return colname\r\n else:\r\n return F.lit(None).alias(colname)\r\n cols = map(processCols, allcols)\r\n return list(cols)\r\n\r\ndef unionAll(columns, *dfs):\r\n \"\"\"\r\n Union all dataframes (reduce)\r\n \"\"\"\r\n return reduce(DataFrame.union, \r\n [df.select(expr(df.columns, columns)) for df in dfs])\r\n\r\n# Faster and more memory efficient toPandas\r\ndef _map_to_pandas(rdds):\r\n \"\"\" Needs to be here due to pickling issues \"\"\"\r\n return [pd.DataFrame(list(rdds))]\r\n\r\ndef toPandas(df, n_partitions=None):\r\n \"\"\"\r\n Returns the contents of `df` as a local `pandas.DataFrame` in a \r\n speedy fashion. The DataFrame is repartitioned if `n_partitions` is passed.\r\n :param df: pyspark.sql.DataFrame\r\n :param n_partitions: int or None\r\n :return: pandas.DataFrame\r\n \"\"\"\r\n if n_partitions is not None: df = df.repartition(n_partitions)\r\n df_pand = df.rdd.mapPartitions(_map_to_pandas).collect()\r\n df_pand = pd.concat(df_pand)\r\n df_pand.columns = df.columns\r\n return df_pand\r\n\r\n# Spark to Dask dataframe\r\ndef _map_to_dask(rdds):\r\n return [dd.DataFrame(list(rdds))]\r\n\r\ndef toDask(df, n_partitions=None):\r\n \"\"\"\r\n Returns the content of `df` as a Dask Dataframe in a speedy fashion.\r\n The DataFrame is repartitioned if `n_partitions` is passed.\r\n :param df: pyspark.sql.DataFrame\r\n :param n_partitions: int or None\r\n :return: Dask dataframe\r\n \"\"\"\r\n raise(NotImplementedError('This method has not been fully implemented'))\r\n if n_partitions is not None: df = df.repartition(n_partitions)\r\n df_dd = df.rdd.mapPartitions(_map_to_dask).collect()\r\n return df_dd\r\n \r\n\r\ndef countNullCols(df, cols=[]):\r\n \"\"\"\r\n Reduce for df.where(F.col(x).isNull()).count()\r\n\r\n \"\"\"\r\n if len(cols) < 1: # All columns\r\n return df.where(reduce(lambda x, y: x | y, \\\r\n (F.col(x).isNull() for x in df.columns))).count()\r\n elif len(cols) == 1:\r\n return df.where(F.col(cols[0]).isNull()).count()\r\n else: # len(cols)>1:\r\n return df.where(reduce(lambda x, y: x | y, \\\r\n (F.col(x).isNull() for x in cols))).count()\r\n \r\nfrom pyspark.sql.types import *\r\n\r\n# Auxiliar functions\r\n# Pandas Types -> Sparks Types\r\ndef equivalent_type(f):\r\n if f == 'datetime64[ns]':\r\n return DateType()\r\n elif f == 'int64':\r\n return LongType()\r\n elif f == 'int32':\r\n return IntegerType()\r\n elif f == 'float64':\r\n return FloatType()\r\n else:\r\n return StringType()\r\n\r\ndef define_structure(string, format_type):\r\n try: \r\n typo = equivalent_type(format_type)\r\n except: \r\n typo = StringType()\r\n return StructField(string, typo)\r\n\r\n#Given pandas dataframe, it will return a spark's dataframe\r\ndef pandas_to_spark(df_pandas):\r\n columns = list(df_pandas.columns)\r\n types = list(df_pandas.dtypes)\r\n struct_list = []\r\n for column, typo in zip(columns, types): \r\n struct_list.append(define_structure(column, typo))\r\n p_schema = StructType(struct_list)\r\n return sqlContext.createDataFrame(df_pandas, p_schema)\r\n"
},
{
"alpha_fraction": 0.569767415523529,
"alphanum_fraction": 0.6018151044845581,
"avg_line_length": 31.903846740722656,
"blob_id": "c92bb0ff6ad5cbad63e662eb6cb287212b7d32bc",
"content_id": "de6831005cc889e294a01940b362b1ba943e0a30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3526,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 104,
"path": "/init_py.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 6 09:44:12 2016\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n# matplotlib.use('PS')\r\nimport sys\r\nimport os\r\n\r\n# parsing mode\r\nif len(sys.argv)>1:\r\n if any(['ipy' in argv.lower() for argv in sys.argv]) or \\\r\n any(['jupyter' in argv.lower() for argv in sys.argv]) or \\\r\n __file__ in sys.argv: # %run magic command\r\n MODE = 'jupyter'\r\n else: # assume this is called from the command line\r\n MODE = sys.argv[1]\r\nelse:\r\n MODE = 'academic'\r\n\r\nimport matplotlib\r\nif sys.platform == 'darwin':\r\n PYPKGPATH = os.path.dirname(os.path.realpath(__file__))\r\nelse:\r\n PYPKGPATH = 'D:/Edward/Documents/Assignments/Scripts/Python/'\r\n# PYPKGPATH = '/Users/edward/Documents/Scripts/Python'\r\nsys.path.append(PYPKGPATH)\r\nimport glob\r\npkg_list = [os.path.basename(os.path.normpath(p)) for p in glob.glob(PYPKGPATH+'/*') if os.path.isdir(p)]\r\n\r\ndef addpythonpkg(pkg=None, recursively=False):\r\n \"\"\"Instead of doing complicated import, simply add the path of custom scripts\"\"\"\r\n if pkg is None:\r\n # print(pkg_list)\r\n return pkg_list\r\n if isinstance(pkg, str):\r\n pkg = [pkg]\r\n # add packages\r\n for p in pkg:\r\n if p in pkg_list and p not in sys.path:\r\n if recursively:\r\n [sys.path.append(os.path.join(PYPKGPATH, x[0])) for x in os.walk(p)]\r\n else:\r\n sys.path.append(os.path.join(PYPKGPATH, p))\r\n else:\r\n raise(Exception('package %s is not available. Check spelling'%p))\r\n\r\n\r\ndef getaddedpkgs():\r\n \"\"\"List packages that has been added\"\"\"\r\n added_paths = []\r\n for p in sys.path:\r\n normpath = os.path.basename(os.path.normpath(p))\r\n if normpath in pkg_list and normpath not in added_paths:\r\n added_paths.append(normpath)\r\n\r\n return added_paths\r\n\r\ndef getaddedmodules():\r\n return sys.modules.keys()\r\n\r\n#def cd(path=None):\r\n# \"\"\"overloading cd\"\"\"\r\n# if not isinstance(path,str):\r\n# return\r\n# elif path.lower()== 'home':\r\n# os.chdir(PYPKGPATH)\r\n# elif path.lower()== '..':\r\n# os.chdir(os.path.dirname(os.getcwd()))\r\n# else:\r\n# os.chdir(path)\r\n\r\nglobal tableau10\r\nglobal FONT\r\nglobal FONTSIZE\r\n\r\nif MODE == 'academic':\r\n print('academic mode')\r\n # Do some generic import\r\n addpythonpkg(['Spikes', 'generic', 'Plots'])\r\n import pandas as pd\r\n import numpy as np\r\n from spk_util import *\r\n from MATLAB import *\r\n from ImportData import *\r\n from plots import *\r\n # Added the platelet tableau10\r\n tableau10 = [\"#1f77b4\",\"#ff7f0e\", \"#2ca02c\",\"#d62728\",\"#9467bd\",\"#8c564b\",\"#e377c2\",\"#7f7f7f\",\"#bcbd22\",\"#17becf\"]\r\n FONT = 'D:/Edward/Documents/Assignments/Scripts/Python/PySynapse/resources/Helvetica.ttf'\r\n FONTSIZE = {'title':10, 'xlab':8, 'ylab':8, 'xtick':6,'ytick':6,'texts':6,\r\n 'legend': 6, 'legendtitle':6, 'xminortick':6, 'yminortick':6} # font size\r\nelif MODE == 'jupyter':\r\n print('jupyter mode')\r\n addpythonpkg(['generic', 'Plots'])\r\n import pandas as pd\r\n import numpy as np\r\n from MATLAB import *\r\n from plots import *\r\n tableau10 = [\"#1f77b4\",\"#ff7f0e\", \"#2ca02c\",\"#d62728\",\"#9467bd\",\"#8c564b\",\"#e377c2\",\"#7f7f7f\",\"#bcbd22\",\"#17becf\"]\r\n FONT = 'D:/Edward/Documents/Assignments/Scripts/Python/PySynapse/resources/Helvetica.ttf'\r\n FONTSIZE = {'title':12, 'xlab':10, 'ylab':10, 'xtick':8,'ytick':8,'texts':8,\r\n 'legend': 8, 'legendtitle':8, 'xminortick':8, 'yminortick':8} # font size\r\n"
},
{
"alpha_fraction": 0.7706422209739685,
"alphanum_fraction": 0.7706422209739685,
"avg_line_length": 20.799999237060547,
"blob_id": "a515f7e4561e7fefb6ff7f8c35c6df881d53a2dd",
"content_id": "c0092ac5e90710eb6f45832aa11b6318a4882188",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 5,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/__init__.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "from filtering import *\nfrom threshold import *\nfrom graph import *\nfrom pca import *\nfrom waveform import *\n"
},
{
"alpha_fraction": 0.5460829734802246,
"alphanum_fraction": 0.578341007232666,
"avg_line_length": 31.120370864868164,
"blob_id": "7ede367f8d16a35eb943ee04e56e12c65f0b9d2f",
"content_id": "4d5608da05d6eb1cb6cba83f672de7c0f099935b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3472,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 108,
"path": "/generic/batchwise_set_operations_count.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 27 17:26:24 2023\n\n@author: edwardcui\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse\n\ndef set_operation_counts(x, y, operation=\"intersection\", pad=None):\n \"\"\"Batch-wise count after set operations\"\"\"\n # Input shapes\n n_d, m_x = x.shape\n n_d, m_y = y.shape\n\n # Use np.unique to create convert from data -> indices\n # This can appropriately handle all data types, including strings\n unique, indices = np.unique(np.hstack((x, y)), return_inverse=True)\n n_unique = len(unique)\n if pad is not None:\n pad_index = np.where(unique == pad)[0][0]\n else:\n pad_index = -1\n\n # From flattened index -> original shape\n indices = indices.reshape(n_d, -1)\n indices_x = indices[:, :m_x]\n indices_y = indices[:, m_x:]\n \n # Use coo format to create to create binary indicator matrices\n # e.g. index = [1, 3], n_unique = 5 -> [0, 1, 0, 1, 0]\n def _create_coo_matrix_from_dense_idx(idx, m):\n r = np.repeat(np.arange(n_d), m) # row index\n c = idx.ravel() # flatten\n data = np.ones_like(r, dtype=int)\n data[c==pad_index] = 0 # filter out pad index\n return scipy.sparse.coo_matrix((data, (r, c)), shape=(n_d, n_unique), dtype=int)\n \n def _create_coo_matrix_from_sparse_idx(idx, m):\n idx = idx.tocoo() # convert to coo format from other formats\n r = idx.row\n c = idx.data\n data = np.ones_like(r, dtype=int)\n data[c==pad_index] = 0 # filter out pad index\n return scipy.sparse.coo_mamtrix((data, (r, c)), shape=(n_d, n_unique), dtype=int)\n \n x_hat = _create_coo_matrix_from_dense_idx(indices_x, m_x)\n y_hat = _create_coo_matrix_from_dense_idx(indices_y, m_y)\n \n # set operations in binary\n if operation == \"intersection\":\n res = x_hat.multiply(y_hat)\n elif operation == \"union\":\n res = x_hat + y_hat\n res.data = np.minimum(res.data, 1)\n elif operation == \"difference\":\n res = x_hat - y_hat\n res.data = np.maximum(res.data, 0)\n\n return res.sum(axis=1).A.ravel()\n\n\nx = np.array([[1,2,3,4,5], [2,3,4,5,6], [3, 5, 1, 0, 0]]*1024)\ny = np.array([[5,6,7,8,9], [2,3,5,7,8], [3, 1, 0, 0, 0]]*1024)\n \n%timeit res_sparse = set_operation_counts(x, y, operation=\"intersection\", pad=0)\n\n# %% normal for loop\nimport numpy as np\n\ndef batch_wise_intersection(x, y, pad=None):\n batch_size = x.shape[0]\n res = [[]] * batch_size\n for ii in range(batch_size):\n res[ii] = set(x[ii, :]).intersection(set(y[ii, :]))\n if pad is not None:\n res[ii] = res[ii].difference(set([pad]))\n \n return res\n\nx = np.array([[1,2,3,4,5], [2,3,4,5,6], [3, 5, 1, 0, 0]]*1024)\ny = np.array([[5,6,7,8,9], [2,3,5,7,8], [3, 1, 0, 0, 0]]*1024)\n\n%timeit res = batch_wise_intersection(x, y, pad=0)\n\n\n# %% tensorflow\nimport tensorflow as tf\n\n%timeit res_tf = tf.sets.size(tf.sets.intersection(x, y))\n\n#%% indicator\n\nx = np.array([[\"a\", \"b\", \"c\", \"d\"], [\"b\",\"c\", \"d\", \"e\"]])\ny = np.array([[\"b\", \"a\", \"c\", \"e\"], [\"b\", \"a\", \"\", \"\"]])\nn_x, n_d = x.shape\nn_y, n_d = y.shape\nvalues, ix = np.unique(np.vstack((x,y)), return_inverse=True)\nn_unique = len(values)\n\n# reshape the inverse array. ix_x_hat will be an array the same size\n# as x, where ix_x_hat[i,j] gives the index of x[i,j] in values. That\n# is to say, values[ix_x_hat[i,j]] == x[i,j]\nix_hat = ix.reshape(-1, x.shape[1])\nix_x_hat = ix_hat[:n_x]\nix_y_hat = ix_hat[n_x:]\n\n\n\n"
},
{
"alpha_fraction": 0.6972017884254456,
"alphanum_fraction": 0.7119293212890625,
"avg_line_length": 45.125,
"blob_id": "d56cb5a1facad6bdcb14e04237e2a7c1f8556950",
"content_id": "535a6e2e241987c2419eac225e2753f00bc2c042",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3395,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 72,
"path": "/PySynapse/resources/ui_designer/SynapseQt.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'SynapseQt.ui'\r\n#\r\n# Created: Sat Apr 18 21:40:21 2015\r\n# by: PyQt4 UI code generator 4.10.4\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\nfrom PyQt4 import QtCore, QtGui\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig)\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(_fromUtf8(\"MainWindow\"))\r\n MainWindow.resize(638, 275)\r\n self.centralwidget = QtGui.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))\r\n self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)\r\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\r\n self.splitter = QtGui.QSplitter(self.centralwidget)\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())\r\n self.splitter.setSizePolicy(sizePolicy)\r\n self.splitter.setOrientation(QtCore.Qt.Horizontal)\r\n self.splitter.setObjectName(_fromUtf8(\"splitter\"))\r\n self.treeview = QtGui.QTreeView(self.splitter)\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(1)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.treeview.sizePolicy().hasHeightForWidth())\r\n self.treeview.setSizePolicy(sizePolicy)\r\n self.treeview.setSizeAdjustPolicy(QtGui.QAbstractScrollArea.AdjustToContents)\r\n self.treeview.setTextElideMode(QtCore.Qt.ElideNone)\r\n self.treeview.setObjectName(_fromUtf8(\"treeview\"))\r\n self.tableview = QtGui.QTableView(self.splitter)\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(3)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.tableview.sizePolicy().hasHeightForWidth())\r\n self.tableview.setSizePolicy(sizePolicy)\r\n self.tableview.setObjectName(_fromUtf8(\"tableview\"))\r\n self.horizontalLayout.addWidget(self.splitter)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtGui.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 638, 21))\r\n self.menubar.setObjectName(_fromUtf8(\"menubar\"))\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtGui.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(_fromUtf8(\"statusbar\"))\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\", None))\r\n\r\n"
},
{
"alpha_fraction": 0.5306532382965088,
"alphanum_fraction": 0.5623115301132202,
"avg_line_length": 36.11538314819336,
"blob_id": "80f40e162594ce39d51db714f6b158d1146c1f46",
"content_id": "477aba527a098e997aba8e36b3476c25fad502e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1990,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 52,
"path": "/PySynapse/app/ColorComboBox.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 10 11:48:23 2018\r\n\r\nColor drop-down combobox\r\n\r\n@author: Edward\r\n\r\n\"\"\"\r\nimport numpy as np\r\nfrom PyQt5 import QtGui, QtCore, QtWidgets\r\nfrom pdb import set_trace\r\n\r\nclass ColorDropDownCombobox(QtWidgets.QComboBox):\r\n def __init__(self, parent=None,\r\n colors=('#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\r\n '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\r\n '#bcbd22', '#17becf'), default=None):\r\n super(ColorDropDownCombobox, self).__init__(parent)\r\n self.colors = list(colors)\r\n self.setColors(self.colors)\r\n self.setEditable(True)\r\n self.lineEdit().setMaxLength(45)\r\n if default is not None:\r\n self.lineEdit().setText(default)\r\n\r\n def setColors(self, colors=None):\r\n if colors is None: return\r\n if isinstance(colors, (tuple, list, np.ndarray)):\r\n self.colors = list(colors)\r\n for n, c in enumerate(self.colors):\r\n myqcolor = self.parseColor(c)\r\n self.insertItem(n, c)\r\n self.setItemData(n, myqcolor, role=QtCore.Qt.DecorationRole)\r\n\r\n def setColorAt(self, color=None, index=None):\r\n if color is not None and index is not None:\r\n self.colors[index] = color\r\n self.setItemData(index, self.parseColor(color), role=QtCore.Qt.DecorationRole)\r\n\r\n @staticmethod\r\n def parseColor(c):\r\n if isinstance(c, str):\r\n if c[0] == '#': # hex to rgb\r\n c = tuple(int(c.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4))\r\n return QtGui.QColor(*c, alpha=255)\r\n elif QtGui.QColor.isValidColor(c): # test if it is valid QColor\r\n return QtGui.QColor(c)\r\n elif isinstance(c, (tuple, list, np.ndarray)) and len(c) == 3: # rgb\r\n return QtGui.QColor(*c, alpha=255)\r\n else:\r\n raise (TypeError(\"Unrecognized type of 'colors' input\"))\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5356289148330688,
"alphanum_fraction": 0.5686274766921997,
"avg_line_length": 42.279571533203125,
"blob_id": "50394092ecc1e4152cb51e7b0d68d9c4e64eed5b",
"content_id": "53cffd298a5dcebc52fcaff2c24fb91cd2540722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8364,
"license_type": "no_license",
"max_line_length": 209,
"num_lines": 186,
"path": "/Plots/simple/ExportTraces.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 15 17:56:32 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 15 11:16:23 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport sys\r\nsys.path.append('D:/Edward/Documents/Assignments/Scripts/Python/Plots')\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PublicationFigures import PublicationFigures as PF\r\n\r\ncolor = ['#1f77b4','#ff7f0e', '#2ca02c','#d62728','#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd154','#17becf'] # tableau10, or odd of tableau20\r\n\r\ndef SingleEpisodeTraces(base_dir, result_dir, eps=None, channels=['A'], \r\n streams=['Volt','Cur']):\r\n \"\"\"Helper function to export traces from a single episode\"\"\"\r\n if eps is None:\r\n return\r\n # Load data\r\n data = [base_dir %(epi) for epi in eps]\r\n K = PF(dataFile=data,savePath=result_dir, old=True, channels=channels, streams=streams)\r\n\r\n \r\n # Arrange all plots vertcially\r\n fig, ax = plt.subplots(nrows=len(channels)*len(streams), ncols=1, sharex=True)\r\n pcount = 0\r\n yunit_dict = {'Volt':'mV','Cur':'pA','Stim':'pA'}\r\n\r\n for c in channels: # iterate over channels\r\n for s in streams: # iterate over streams\r\n ax[pcount].plot(K.data.table['time'], K.data.table[s+c], label=pcount, c='k')\r\n K.AddTraceScaleBar(xunit='ms', yunit=yunit_dict[s],ax=ax[pcount])\r\n dataposition = [K.data.table['time'][0], K.data.table[s+c][0]]\r\n datatext = '%.0f'%(dataposition[1]) + yunit_dict[s]\r\n K.TextAnnotation(text=datatext, position=dataposition, ax=ax[pcount], color='k',\r\n xoffset='-', yoffset=None, fontsize=None,ha='right',va='center')\r\n pcount += 1\r\n\r\n # Finally annotate the episode information at the bottom\r\n # fig.suptitle(K.data.meta['notes'][0])\r\n pad = np.array(ax[-1].get_position().bounds[:2]) * np.array([1.0, 0.8])\r\n fig.text(pad[0], pad[1], K.data.meta['notes'][0], ha='left',va='bottom')\r\n K.SetFont(ax=ax, fig=fig)\r\n \r\n fig.set_size_inches(10,5)\r\n #K.Save()\r\n return(K, ax, fig)\r\n \r\n \r\ndef MultipleTraces(base_dir, result_dir, eps=None, channel='A', stream='Volt', color=color, window=None):\r\n \"\"\"Helper function to draw multiple traces in a single axis\"\"\"\r\n if eps is None:\r\n return\r\n # load data\r\n data = [base_dir %(epi) for epi in eps]\r\n nep = len(eps)\r\n K = PF(dataFile=data, savePath=result_dir, old=True, channels=[channel], streams=[stream])\r\n \r\n # Initialize axis\r\n fig, ax = plt.subplots(nrows=1, ncols=1)\r\n yunit_dict = {'Volt':'mV','Cur':'pA','Stim':'pA'}\r\n # Draw plots\r\n for n in range(nep):\r\n x, y = K.data.table[n]['time'], K.data.table[n][stream+channel]\r\n ts = x[1] - x[0]\r\n if window is not None:\r\n x, y = x[int(window[0]/ts) : int(window[1]/ts)], y[int(window[0]/ts) : int(window[1]/ts)]\r\n \r\n ax.plot(x, y, label=n, c=color[n%nep])\r\n if n == (nep-1): # add trace bar for the last episode\r\n K.AddTraceScaleBar(xunit='ms', yunit=yunit_dict[stream], ax=ax)\r\n if n == 0: # annotate the first episode\r\n dataposition = [np.array(x)[0], np.array(y)[0]]\r\n datatext = '%.0f'%(dataposition[1]) + yunit_dict[stream]\r\n K.TextAnnotation(text=datatext, position=dataposition, ax=ax, color='k',\r\n xoffset='-', yoffset=None, fontsize=None,ha='right',va='center')\r\n \r\n # update the graph \r\n fig.canvas.draw() \r\n # Finally, annotate the episode information at the bottom\r\n pad = np.array(ax.get_position().bounds[:2]) * np.array([1.0, 0.8])\r\n #fontsize = ax.yaxis.get_major_ticks()[2].label.get_fontsize()\r\n inc = 0.025 \r\n #inc = K.xydotsize(ax, s=fontsize,scale=(1.,1.))[1]\r\n #print(inc)\r\n #inc = inc/(ax.get_ybound()[1] - ax.get_ybound()[0])*(ax.get_position().bounds[3]-ax.get_position().bounds[1])\r\n #print(inc)\r\n\r\n for n, _ in enumerate(eps):\r\n # print(pad[0], pad[1]+inc*n)\r\n fig.text(pad[0], pad[1]-inc*n, K.data.meta['notes'][n], ha='left',va='bottom', color=color[n%len(color)])\r\n \r\n K.SetFont(ax=ax, fig=fig)\r\n fig.set_size_inches(6,4)\r\n return(K, ax, fig)\r\n \r\ndef ConcatenatedTraces(base_dir, result_dir, eps=None, channel='A', stream='Volt', gap=0.05, color='k'):\r\n \"\"\"Heper function to export horizontally concatenated traces\r\n gap: gap between consecutive plots. gap * duration of plot. Default is 0.05,\r\n or 5% of the duration of the plot.\r\n \"\"\"\r\n if eps is None:\r\n return\r\n # load data\r\n data = [base_dir %(epi) for epi in eps]\r\n nep = len(eps)\r\n K = PF(dataFile=data, savePath=result_dir, old=True, channels=[channel], streams=[stream])\r\n \r\n # Initialize axis\r\n fig, ax = plt.subplots(nrows=1, ncols=1)\r\n yunit_dict = {'Volt':'mV','Cur':'pA','Stim':'pA'}\r\n \r\n gap *= max([x['time'].iloc[-1] - x['time'].iloc[0] \r\n for x in K.data.table])\r\n\r\n # initialize the time\r\n x0 = 0.0 \r\n # Draw plots\r\n for n in range(nep):\r\n x, y = K.data.table[n]['time'], K.data.table[n][stream+channel]\r\n # update time shift\r\n x = x + x0\r\n x0 = x.iloc[-1] + gap\r\n \r\n ax.plot(x, y, label=n, c=color)\r\n if n == (nep-1): # add trace bar for the last episode\r\n K.AddTraceScaleBar(xunit='ms', yunit=yunit_dict[stream], ax=ax, \r\n xscale=x.iloc[-1]-x.iloc[0])\r\n if n == 0: # annotate the first episode\r\n dataposition = [np.array(x)[0], np.array(y)[0]]\r\n datatext = '%.0f'%(dataposition[1]) + yunit_dict[stream]\r\n K.TextAnnotation(text=datatext, position=dataposition, ax=ax, color='k',\r\n xoffset='-', yoffset=None, fontsize=None,ha='right',va='center')\r\n \r\n # update the graph \r\n fig.canvas.draw() \r\n # Finally, annotate the episode information at the bottom\r\n pad = np.array(ax.get_position().bounds[:2]) * np.array([1.0, 0.8])\r\n #fontsize = ax.yaxis.get_major_ticks()[2].label.get_fontsize()\r\n inc = 0.025 \r\n #inc = K.xydotsize(ax, s=fontsize,scale=(1.,1.))[1]\r\n #print(inc)\r\n #inc = inc/(ax.get_ybound()[1] - ax.get_ybound()[0])*(ax.get_position().bounds[3]-ax.get_position().bounds[1])\r\n #print(inc)\r\n\r\n for n, _ in enumerate(eps):\r\n # print(pad[0], pad[1]+inc*n)\r\n fig.text(pad[0], pad[1]-inc*n, K.data.meta['notes'][n], ha='left',va='bottom', color=color)\r\n \r\n K.SetFont(ax=ax, fig=fig)\r\n fig.set_size_inches(50,4)\r\n return(K, ax, fig)\r\n \r\n \r\nif __name__ == '__main__':\r\n #base_dir = 'D:/Data/2015/07.July/Data 10 Jul 2015/Neocortex K.10Jul15.S1.E%d.dat'\r\n #result_dir = 'C:/Users/Edward/Documents/Assignments/Case Western Reserve/StrowbridgeLab/Projects/TeA Persistence Cui and Strowbridge 2015/analysis/ADP under Pirenzepine - 07152015/example.svg'\r\n #eps = [38]\r\n #channels=['A']\r\n #streams=['Volt','Cur']\r\n #K, ax, fig = SingleEpisodeTraces(base_dir, result_dir, eps=eps)\r\n ####################################### \r\n #base_dir = 'D:/Data/Traces/2015/10.October/Data 21 Oct 2015/Neocortex C.21Oct15.S1.E%d.dat'\r\n #result_dir = 'C:/Users/Edward/Documents/Assignments/Case Western Reserve/StrowbridgeLab/Projects/TeA Persistence Cui and Strowbridge 2015/analysis/Self termination with stimulation - 10222015/example.eps'\r\n #result_dir = 'C:/Users/Edward/Desktop/asdf.svg'\r\n #eps = range(53, 58, 1)\r\n #channel = 'A'\r\n #stream = 'Volt'\r\n #K, ax, fig = MultipleTraces(base_dir, result_dir, eps=eps, channel=channel, stream=stream, window=[2000, 4000])\r\n #fig.savefig(result_dir, bbox_inches='tight', dpi=300)\r\n ########################################\r\n base_dir = 'D:/Data/Traces/2015/06.June/Data 17 Jun 2015/Neocortex H.17Jun15.S1.E%d.dat'\r\n eps = np.arange(150, 160, 1)\r\n result_dir = 'C:/Users/Edward/Desktop/concatplot.svg'\r\n channel = 'A'\r\n stream = 'Volt'\r\n K, ax, fig = ConcatenatedTraces(base_dir, result_dir, eps=eps, channel=channel, stream=stream, gap=0.05)\r\n fig.savefig(result_dir, bbox_inches='tight', dpi=300)\r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n"
},
{
"alpha_fraction": 0.5257879495620728,
"alphanum_fraction": 0.5576249361038208,
"avg_line_length": 37.484275817871094,
"blob_id": "af3e403409cd897eff261e02c8fc8f9872e6dd8f",
"content_id": "44c437a76dc489d334404ace83e21332e276fe1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6282,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 159,
"path": "/Plots/archive/geodata.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 24 16:55:48 2018\r\n\r\nConvert geojson to simple json dict\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n\r\nimport json\r\n# %% plot map\r\ndef plot_state_map(states, P_AK=[-77, 5, -0.22, 0.25, 0.35], P_HI=[-38, 17, 0, 0.4, 0.4], exclude_states=['','PR', 'VI', 'DC', 'GU', 'MP', 'AS']):\r\n xs, ys = states['AK']['lons'],states['AK']['lats']\r\n # transoformation\r\n X = np.array([xs, ys, list(np.ones(len(xs)))])\r\n A = spm_matrix_2d(P=P_AK)\r\n Y = A @ X\r\n states['AK']['lons'] = list(Y[0,:])\r\n states['AK']['lats'] = list(Y[1,:])\r\n \r\n xs, ys = states['HI']['lons'],states['HI']['lats']\r\n X = np.array([xs, ys, list(np.ones(len(xs)))])\r\n A = spm_matrix_2d(P=P_HI)\r\n Y = A @ X\r\n states['HI']['lons'] = list(Y[0,:])\r\n states['HI']['lats'] = list(Y[1,:])\r\n \r\n # plot transformation\r\n fig, ax = plt.subplots(1,1)\r\n fig.set_size_inches(7, 4.5)\r\n for s in states.keys():\r\n if s in exclude_states: continue\r\n xs, ys = states[s]['lons'],states[s]['lats']\r\n ax.plot(xs, ys)\r\n return states\r\n\r\n# %% states\r\n#fs = '5m'\r\n#file_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/cb_2017_us_state_%s.geojson'%fs\r\n#with open(file_dir, 'r') as data_file:\r\n# state_map = json.load(data_file)\r\n# \r\n#state_map = state_map['features']\r\n#states = {}\r\n#for sm in state_map:\r\n# name = sm['properties']['NAME']\r\n# key = sm['properties']['STUSPS']\r\n# state_id = sm['properties']['STATEFP']\r\n# coords = sm['geometry']['coordinates']\r\n# lons_lats = np.empty((0, 2))\r\n# # Gather the lons and lats\r\n# for n, c in enumerate(coords):\r\n# current_coords = np.array(c).squeeze()\r\n# if key == 'AK' and any(current_coords[:, 0]>0):\r\n# current_coords[:, 0] = -current_coords[:, 0]\r\n# lons_lats = np.concatenate((lons_lats, current_coords), axis=0)\r\n# if n < len(coords)-1:\r\n# lons_lats = np.concatenate((lons_lats, np.array([[np.nan, np.nan]])), axis=0)\r\n# \r\n# lons = list(lons_lats[:, 0])\r\n# lats = list(lons_lats[:, 1])\r\n# states[key] = {'name':name, 'id':state_id, 'lons':lons, 'lats':lats}\r\n#\r\n##plot_state_map(states, P_AK)\r\n#if fs == '500k':\r\n# states_shifted = plot_state_map(states, P_AK=[-70, 9, 0, 0.30, 0.30], P_HI=[-4, 12, 0,0.6, 0.6]) # for 500k resolution only\r\n#else:\r\n# states_shifted = plot_state_map(states, P_AK=[-70, 9, 0, 0.30, 0.30], P_HI=[51, 7, 0,])\r\n## write file\r\n#file_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/CBUS_states_scaled_%s.json'%fs\r\n#with open(file_dir,'w') as outfile:\r\n# json.dump(states_shifted, outfile)\r\n\r\n# %% \r\ndef plot_county_map(counties, P_AK=[-77, 5, -0.22, 0.25, 0.35], P_HI=[-38, 17, 0, 0.4, 0.4], exclude_states=['', 'PR', 'VI', 'DC', 'GU', 'MP', 'AS']):\r\n for key in counties.keys():\r\n if key[0] == 2 or key[0] == 15: # Alaska or Hawaii\r\n xs = counties[key]['lons']\r\n ys = counties[key]['lats']\r\n X = np.array([xs, ys, list(np.ones(len(xs)))])\r\n A = spm_matrix_2d(P=P_HI if key[0]==15 else P_AK)\r\n Y = A @ X\r\n counties[key]['lons'] = list(Y[0,:])\r\n counties[key]['lats'] = list(Y[1,:])\r\n else:\r\n pass\r\n \r\n # plot transformation\r\n fig, ax = plt.subplots(1,1)\r\n fig.set_size_inches(7, 4.5)\r\n for c in counties.keys():\r\n #print(counties[c]['state_key'])\r\n if counties[c]['state_key'] in exclude_states: continue\r\n xs, ys = counties[c]['lons'],counties[c]['lats']\r\n ax.plot(xs, ys)\r\n return counties\r\n\r\n# %% Counties\r\nfs = '20m'\r\nstate_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/CBUS_states_scaled_20m.json'\r\ncounty_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/cb_2017_us_county_%s.geojson'%fs\r\nwith open(state_dir, 'r') as data_file:\r\n states = json.load(data_file)\r\n\r\nwith open(county_dir, 'r') as data_file:\r\n county_map = json.load(data_file)\r\n\r\ncounties = {}\r\ncounties_ids = []\r\ncounty_map = county_map['features']\r\nfor cm in county_map:\r\n name = cm['properties']['NAME']\r\n county_id = cm['properties']['COUNTYFP']\r\n state_id = cm['properties']['STATEFP']\r\n state_name, state_key = '', ''\r\n for kk in states:\r\n if states[kk]['id'] == state_id:\r\n state_name = states[kk]['name']\r\n state_key = kk\r\n break\r\n \r\n coords = cm['geometry']['coordinates']\r\n lons_lats = np.empty((0, 2))\r\n # Gather the lons and lats\r\n for n, c in enumerate(coords):\r\n current_coords = np.array(c).squeeze()\r\n if current_coords.ndim<2:\r\n current_coords = np.empty((0,2))\r\n for m, cc in enumerate(c):\r\n current_coords = np.concatenate((current_coords, cc), axis=0)\r\n if m < len(c)-1:\r\n current_coords = np.concatenate((current_coords, np.array([[np.nan, np.nan]])), axis=0)\r\n if state_key == 'AK' and any(current_coords[:, 0]>0):\r\n current_coords[:, 0] = -current_coords[:, 0]\r\n lons_lats = np.concatenate((lons_lats, current_coords), axis=0)\r\n if n < len(coords)-1:\r\n lons_lats = np.concatenate((lons_lats, np.array([[np.nan, np.nan]])), axis=0)\r\n\r\n lons = list(lons_lats[:, 0])\r\n lats = list(lons_lats[:, 1])\r\n counties[(int(state_id), int(county_id))] = {'name':name, 'id': county_id, 'state': state_name, \r\n 'state_key': state_key, 'lons':lons, 'lats':lats}\r\nif fs == '500k':\r\n counties_scaled = plot_county_map(counties, P_AK=[-70, 9, 0, 0.30, 0.30], P_HI=[-4, 12, 0,0.6, 0.6]) # for 500k resolution only\r\nelse:\r\n counties_scaled = plot_county_map(counties, P_AK=[-70, 9, 0, 0.30, 0.30], P_HI=[51, 7, 0,])\r\n \r\n# write file\r\nfile_dir = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/CBUS_counties_scaled_%s.json'%fs\r\ndef remap_keys(mapping, func=str): #json can't take tuple keys, only string keys\r\n return {func(k):v for k,v in mapping.items()}\r\n\r\nwith open(file_dir,'w') as outfile:\r\n json.dump(remap_keys(counties_scaled), outfile)\r\n \r\n# %%\r\nwith open(file_dir, 'r') as outfile:\r\n counties_loaded = remap_keys(json.load(outfile), func=lambda x: tuple(str2num(x)))\r\n "
},
{
"alpha_fraction": 0.6645068526268005,
"alphanum_fraction": 0.6976242065429688,
"avg_line_length": 51.346153259277344,
"blob_id": "1ecab5c5b1204afd33be1c446976fcdc2191ba7f",
"content_id": "e9d0292c263f4930a9639985fe922a6434bd332e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1389,
"license_type": "no_license",
"max_line_length": 286,
"num_lines": 26,
"path": "/image_processing/batch_cdxml2svg.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 08 05:08:22 2015\r\n\r\nBatch convert cdxml to svg file\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport subprocess, glob, os\r\nsource_dir = 'C:/ProgramData/CambridgeSoft/ChemOffice2015/ChemDraw/ChemDraw Items/BioDrawResources'\r\ntarget_dir = 'C:/Users/Edward/Desktop/save'\r\n#'\"C:/Program Files (x86)/CambridgeSoft/ChemOffice2015/ChemDraw/ChemDrawSVG/SVGConverter.exe\" -i\"C:/Users/Edward/Desktop/save/GS_Amoeba.cdxml\" -o\"C:/Users/Edward/Desktop/save/GS_Amoeba.svg\" -c\"C:/Program Files (x86)/CambridgeSoft/ChemOffice2015/ChemDraw/ChemDrawSVG/configuration.xml\"'\r\ncmd_str = '\"C:/Program Files (x86)/CambridgeSoft/ChemOffice2015/ChemDraw/ChemDrawSVG/SVGConverter.exe\" -i\"%s\" -o\"%s\" -c\"C:/Program Files (x86)/CambridgeSoft/ChemOffice2015/ChemDraw/ChemDrawSVG/configuration.xml\"'\r\n\r\nsource_img_list = glob.glob(os.path.join(source_dir,'*.cdxml'))\r\nfid = open(os.path.join(target_dir, 'cdxml2svg.bat'),'w')\r\nfor source_img in source_img_list:\r\n source_img = source_img.replace('\\\\','/')\r\n target_img = os.path.basename(source_img).replace('.cdxml','.svg')\r\n target_img = os.path.join(target_dir, target_img).replace('\\\\','/')\r\n #exe_cmd = [cmd_str[0], cmd_str[1]%(source_img), cmd_str[2]%(target_img), cmd_str[3]]\r\n #subprocess.call(exe_cmd)\r\n exe_cmd = cmd_str%(source_img, target_img)\r\n fid.write(exe_cmd)\r\n fid.write('\\r\\n')\r\nfid.close()\r\n\r\n"
},
{
"alpha_fraction": 0.7688679099082947,
"alphanum_fraction": 0.78125,
"avg_line_length": 139.3333282470703,
"blob_id": "7652bdf1f88b210356b1348b78f1e7bf0f8eb25b",
"content_id": "e4cad827fc3707dab44c68cf138c63a6db929451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1702,
"license_type": "no_license",
"max_line_length": 463,
"num_lines": 12,
"path": "/PySynapse/archive/using pyqt5 with spyder.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "Using PyQt 5 with Spyder.\r\nI have a toy project where I want to go the whole scripted/interpreted way with Qt. For that I use this toolchain: Qt 5.1 with QtQuick 2.0, PyQt 5 and Python 3.3. Lately my Python-Editor of choice, Spyder, does support Python 3.3, but it is based on PyQt 4 and a release of Qt 4. That does not fit my bill, as I need some new 5.0 stuff from the QMultimedia module. (If you need just Qt 4 functionality, that toolchain works like a charm and you can stop reading.)\r\n\r\nJust downloading a binary release of PyQt 5 and installing it, fails as it detects the PyQt 4, which needs to be uninstalled first, but doing this kills Spyder. Bummer.\r\nDownloading the source and building it (which is actually very straightforward, just follow the instructions in the readme - don't forget to do (n)make install and update the Python module name list (in Spyder)), does the trick. Now both PyQt releases are available.\r\nBut importing QtCore from PyQt 5 causes a runtime crash:\r\nRuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap the QObject class\r\nObviously PyQt 4 is imported, too. A quick\r\nimport sys\r\nsys.modules.keys()\r\nconfirms that. The reason for that is that Spyder installs a special input hook, replacing the input hook of PyQt, which is said to cause problems with Spyder. Fortunately you can disable this hook under Tools > Preferences > Console > External modules. After a restart of the current console, you can now import QtCore and you can start developing your stuff.\r\nThere is a slight annoying thing regarding the hook though: you now can not interactively work with your application in the console. The solution for that comes in the next post.\r\n"
},
{
"alpha_fraction": 0.6277673840522766,
"alphanum_fraction": 0.6386491656303406,
"avg_line_length": 44.98214340209961,
"blob_id": "c7bc097790e1c46ac8f602681e41e535337414fb",
"content_id": "67c2a2d4da1b20555a56c36c6a5cf282a84971f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2665,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 56,
"path": "/image_processing/ImageSegmentation.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 22 00:22:31 2014\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n# Segmenting the picture in regions\r\nimport numpy as np\r\nimport pylab as pl\r\nfrom scipy.ndimage.interpolation import zoom\r\nfrom sklearn.feature_extraction import image\r\nfrom sklearn.cluster import spectral_clustering\r\n\r\n\r\ndef SpectralClusterImage(input_image, beta=5, eps=1e-6, n_regions=11, assign_labels='discretize',downsample_factor=np.NaN, order=3):\r\n \"\"\" Spectral Cluster an image\r\n Inputs:\r\n input_image: ndarray of image\r\n beta: Take a decreasing function of the gradient: an exponential\r\n The smaller beta is, the more independent the segmentation is of \r\n the acutal image. For beta=1, the segmentation is close to a \r\n voronoi. Default is 5.\r\n eps: error term. Default is 1E-6\r\n n_regions: number of regions to decompose into. Default is 11.\r\n assign_labels: ways of decomposition. Selecting from 'discretize' and \r\n 'kmeans'. Default is 'discretize'.\r\n downsample_factor: downsampling before spectral decomposition. Default\r\n is to keep the original sampling. Enter a single number to apply\r\n the kernel for both dimensions of the image, or enter as a sequence\r\n to apply different kernel for each dimension\r\n order: downsampling method, order of B-spline interpolation\r\n \"\"\"\r\n # Downsample the image\r\n if not np.isnan(downsample_factor):\r\n zoom(input_image, zoom=downsample_factor, order=order)\r\n # Convert the image into a graph with the value of the gradient on the edges\r\n graph = image.img_to_graph(input_image)\r\n # Take a decreasing function of the gradient: an exponential\r\n # The smaller beta is, the more independent the segmentation is of the\r\n # acutal image. For beta=1, the segmentation is close to a voronoi\r\n graph.data = np.exp(-beta * graph.data / input_image.std()) + eps \r\n # Apply spectral clustering (this step goes much faster if yuo have pyamg \r\n # installed) \r\n labels = spectral_clustering(graph, n_clusters=n_regions,\r\n assign_labels='discretize')\r\n labels = labels.reshape(input_image.shape)\r\n # Visualizing the resulting regions\r\n pl.figure(figsize=(5,5))\r\n pl.imshow(input_image, cmap=pl.cm.gray)\r\n for lb in range(n_regions):\r\n pl.contour(labels == lb, contour=1,\r\n color=[pl.cm.spectral(lb / float(n_regions)), ])\r\n # Get rid of x, y tick marks\r\n pl.xticks(())\r\n pl.yticks(())\r\n\r\n\r\n\r\n "
},
{
"alpha_fraction": 0.5866441130638123,
"alphanum_fraction": 0.595604419708252,
"avg_line_length": 36.91447448730469,
"blob_id": "e2a2bcc535916edbf8cdcfb3d8681015ea84c22e",
"content_id": "314ee3254e6caf815f9e5c70c95e3dcc4305dd39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5915,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 152,
"path": "/PySynapse/util/svg2eps.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 3 18:44:36 2016\r\n\r\nCall Adobe Illustrator to convert .svg to .eps\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport signal\r\nimport subprocess\r\nimport time\r\nfrom pdb import set_trace\r\n\r\njsx_file_str_AI_CS6 = \"\"\"\r\nfunction exportFigures_AI_CS6(sourceFile, targetFile, exportType, ExportOpts) {\r\n if (sourceFile){ // if not an empty string\r\n var fileRef = new File(sourceFile)\r\n var sourceDoc = app.open(fileRef); // returns the document object\r\n } else { // for empty string, use current active document\r\n sourceDoc = app.activeDocument();\r\n }\r\n var newFile = new File(targetFile) // newly saved file\r\n\r\n switch(exportType){\r\n case 'png':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsPNG24()\r\n ExportOpts.antiAliasing = true;\r\n ExportOpts.transparency = true;\r\n ExportOpts.saveAsHTML = true;\r\n }\r\n // Export as PNG\r\n sourceDoc.exportFile(newFile, ExportType.PNG24, ExportOpts);\r\n case 'tiff':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsTIFF();\r\n ExportOpts.resolution = 600;\r\n ExportOpts.byteOrder = TIFFByteOrder.IBMPC;\r\n ExportOpts.IZWCompression = false;\r\n ExportOpts.antiAliasing = true\r\n }\r\n sourceDoc.exportFile(newFile, ExportType.TIFF, ExportOpts);\r\n case 'svg':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsSVG();\r\n ExportOpts.embedRasterImages = true;\r\n ExportOpts.embedAllFonts = true;\r\n ExportOpts.fontSubsetting = SVGFontSubsetting.GLYPHSUSED;\r\n }\r\n // Export as SVG\r\n sourceDoc.exportFile(newFile, ExportType.SVG, ExportOpts);\r\n case 'eps':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new EPSSaveOptions();\r\n ExportOpts.cmykPostScript = true;\r\n ExportOpts.embedAllFonts = true;\r\n }\r\n // Export as EPS\r\n sourceDoc.saveAs(newFile, ExportOpts);\r\n }\r\n // Close the file after saving. Simply save another copy, do not overwrite\r\n sourceDoc.close(SaveOptions.DONOTSAVECHANGES);\r\n}\r\n\r\n// Use the function to convert the files\r\nexportFigures_AI_CS6(sourceFile=\"{format_source_file}\", targetFile=\"{format_target_file}\", exportType=\"eps\", ExportOpts=null)\r\n// exportFigures_AI_CS6(sourceFile=arguments[0], targetFile=arguments[1], exportType=arguments[2])\r\n\"\"\"\r\n\r\n\r\ndef svg2eps_ai(source_file, target_file, \\\r\n illustrator_path=\"D:/Edward/Software/Adobe Illustrator CS6/Support Files/Contents/Windows/Illustrator.exe\",\\\r\n jsx_file_str = jsx_file_str_AI_CS6, DEBUG=False):\r\n \"\"\"Use Adobe Illustrator to convert svg to eps\"\"\"\r\n # Change the strings\r\n jsx_file_str = jsx_file_str.replace('{format_source_file}', source_file)\r\n jsx_file_str = jsx_file_str.replace('{format_target_file}', target_file).replace('\\\\','/')\r\n tmp_f = os.path.join(os.path.dirname(target_file), \"tmp.jsx\")\r\n #set_trace()\r\n #print(tmp_f)\r\n tmp_osa = None\r\n f = open(tmp_f, 'w')\r\n f.write(jsx_file_str)\r\n f.close()\r\n\r\n # Remove previous target file if already existed\r\n if os.path.isfile(target_file):\r\n os.remove(target_file)\r\n\r\n running_os = sys.platform[:3].lower()\r\n if running_os == 'win':\r\n cmd = \" \".join(['\"'+illustrator_path+'\"', '-run', '\"'+tmp_f+'\"'])\r\n pro = subprocess.Popen(cmd, stdout=subprocess.PIPE)\r\n # continuously check if new files are updated\r\n time.sleep(5.0)\r\n sleep_iter = 5.0\r\n max_sleep_iter = 40\r\n while not os.path.isfile(target_file):\r\n time.sleep(1.0)\r\n sleep_iter = sleep_iter + 1.0\r\n if sleep_iter > max_sleep_iter:\r\n break\r\n pro.kill()\r\n elif running_os == 'dar': # mac\r\n applescript = '''tell application \"Adobe Illustrator\"\r\n activate\r\n do javascript \"#include {}\"\r\n quit\r\n end tell\r\n '''.format(tmp_f)\r\n args = [item for x in [(\"-e\", l.strip()) for l in applescript.split('\\n') if l.strip() != ''] for item in x]\r\n proc = subprocess.Popen([\"osascript\"] + args, stdout=subprocess.PIPE)\r\n progname = proc.stdout.read().strip()\r\n sys.stdout.write(str(progname))\r\n else:\r\n raise(Exception(\"Unrecognized system\"))\r\n\r\n os.remove(tmp_f)\r\n\r\ndef svg2eps_inkscape(source_file, target_file, \\\r\n inkscape_path='\"D:\\\\Edward\\\\Software\\\\inkscape-0.91-1-win64\\\\inkscape.exe\"'):\r\n \"\"\"Use inkscape to convert svg to eps\"\"\"\r\n # cmd = \"inkscape in.svg -E out.eps --export-ignore-filters --export-ps-level=3\"\r\n cmd = inkscape_path+\" \"+source_file+\" --export-eps=\"+target_file +\" --export-ignore-filters --export-ps-level=3\"\r\n print(cmd) # Problem: text was not kept as text, but converted into paths\r\n pro = subprocess.Popen(cmd, stdout=subprocess.PIPE)\r\n #subprocess.check_call([inkscape_path, source_file, '-E', target_file])\r\n print(pro.stdout)\r\n \r\n#def svg2eps_cloudconvert(source_file, target_file):\r\n# import cloudconvert\r\n# api = cloudconvert.Api('5PGyLT7eAn0yLbnBU3G-7j1JLFWTfcnFUk6x7k_lhuwzioGwqO7bVQ-lJNunsDkrr9fL1JDdjdVog6iDZ31yIw')\r\n# process = api.convert({\"input\": \"upload\",\r\n# \"file\": open('R:/temp.svg', 'rb'),\r\n# \"inputformat\": \"svg\",\r\n# \"outputformat\": \"eps\",\r\n# })\r\n# process.wait()\r\n# process.download()\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n source_file = '/Volumes/SD/temp.svg'\r\n target_file = '/Volumes/SD/temp.eps'\r\n illustrator_path=\"D:/Edward/Software/Adobe Illustrator CS6/Support Files/Contents/Windows/Illustrator.exe\"\r\n javascript_path=\"/Volumes/SD/tmp.jsx\"\r\n svg2eps_ai(source_file, target_file)\r\n # svg2eps_inkscape(source_file, target_file)\r\n"
},
{
"alpha_fraction": 0.5612064003944397,
"alphanum_fraction": 0.5708653926849365,
"avg_line_length": 31.519229888916016,
"blob_id": "5e48734946af3fd7f25082bbf8a3d27475eaf892",
"content_id": "3bae5938fb395744b5f99465e4b458b879510499",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5073,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 156,
"path": "/Spikes/spikedetekt2/spikedetekt2/core/tests/test_main.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Main module tests.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport os.path as op\nimport tempfile\nimport shutil\n\nimport numpy as np\nimport tables as tb\nfrom nose import with_setup\n\nfrom kwiklib.dataio import (BaseRawDataReader, read_raw, create_files,\n open_files, close_files, add_recording, add_cluster_group, add_cluster,\n get_filenames, Experiment, excerpts)\nfrom spikedetekt2.core import run\nfrom kwiklib.utils import itervalues, get_params, Probe, create_trace\n\n\n# -----------------------------------------------------------------------------\n# Fixtures\n# -----------------------------------------------------------------------------\nDIRPATH = tempfile.mkdtemp()\n\nsample_rate = 2000.\nduration = 1.\nnchannels = 8\nnsamples = int(sample_rate * duration)\nraw_data = .1 * np.random.randn(nsamples, nchannels)\n# Add \"spikes\".\nfor start, end in excerpts(nsamples, nexcerpts=100, excerpt_size=10):\n raw_data[start:end] *= 5\n\nprm = get_params(**{\n 'nchannels': nchannels,\n 'sample_rate': sample_rate,\n 'detect_spikes': 'positive',\n 'save_high': True,\n 'save_raw': True,\n})\nprb = {0:\n {\n 'channels': list(range(nchannels)),\n 'graph': [(i, i + 1) for i in range(nchannels - 1)],\n }\n}\n\ndef setup():\n create_files('myexperiment', dir=DIRPATH, prm=prm, prb=prb)\n\n # Open the files.\n files = open_files('myexperiment', dir=DIRPATH, mode='a')\n\n # Add data.\n add_recording(files,\n sample_rate=sample_rate,\n nchannels=nchannels)\n add_cluster_group(files, channel_group_id='0', id='0', name='Noise')\n add_cluster(files, channel_group_id='0',)\n\n # Close the files\n close_files(files)\n\ndef teardown():\n files = get_filenames('myexperiment', dir=DIRPATH)\n [os.remove(path) for path in itervalues(files)]\n\n\n# -----------------------------------------------------------------------------\n# Processing tests\n# -----------------------------------------------------------------------------\ndef test_run_nospikes():\n \"\"\"Read from NumPy array file.\"\"\"\n # Run the algorithm.\n with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:\n run(np.zeros((nsamples, nchannels)),\n experiment=exp, prm=prm, probe=Probe(prb))\n\n # Open the data files.\n with Experiment('myexperiment', dir=DIRPATH) as exp:\n assert len(exp.channel_groups[0].spikes) == 0\n\n@with_setup(setup,)\ndef test_run_1():\n \"\"\"Read from NumPy array file.\"\"\"\n # Run the algorithm.\n with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:\n run(raw_data, experiment=exp, prm=prm, probe=Probe(prb),)\n\n # Open the data files.\n with Experiment('myexperiment', dir=DIRPATH) as exp:\n nspikes = len(exp.channel_groups[0].spikes)\n assert exp.channel_groups[0].spikes.clusters.main.shape[0] == nspikes\n assert exp.channel_groups[0].spikes.features_masks.shape[0] == nspikes\n assert exp.channel_groups[0].spikes.waveforms_filtered.shape[0] == nspikes\n\n assert isinstance(exp.channel_groups[0]._node.pca_waveforms,\n tb.Array)\n\n # Assert the log file exists.\n logfile = exp.gen_filename('log')\n assert os.path.exists(logfile)\n\n assert exp.recordings[0].raw.shape == (nsamples, nchannels)\n assert exp.recordings[0].high.shape == (nsamples, nchannels)\n assert exp.recordings[0].low.shape[0] in range(nsamples // 16 - 2,\n nsamples // 16 + 3)\n assert exp.recordings[0].low.shape[1] == nchannels\n\n@with_setup(setup,)\ndef test_run_2():\n \"\"\"Read from .dat file.\"\"\"\n path = os.path.join(DIRPATH, 'mydatfile.dat')\n (raw_data * 1e4).astype(np.int16).tofile(path)\n\n # Run the algorithm.\n with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:\n run(path, experiment=exp, prm=prm, probe=Probe(prb))\n\n # Open the data files.\n with Experiment('myexperiment', dir=DIRPATH) as exp:\n assert len(exp.channel_groups[0].spikes)\n\n@with_setup(setup,)\ndef test_run_canonical_pcs():\n prm_canonical = prm.copy()\n\n canonical_pcs = np.ones((prm['nfeatures_per_channel'],\n prm['waveforms_nsamples'],\n prm['nchannels']))\n prm_canonical['canonical_pcs'] = canonical_pcs\n\n with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:\n run(raw_data, experiment=exp, prm=prm_canonical, probe=Probe(prb),)\n\n@with_setup(setup,)\ndef test_diagnostics():\n\n dir = tempfile.mkdtemp()\n\n path = op.join(dir, 'diagnostics.py')\n with open(path, 'w') as f:\n f.write(\n 'def diagnostics(prm=None, **kwargs):\\n'\n ' print(prm)\\n'\n '\\n')\n\n prm['diagnostics_path'] = path\n\n with Experiment('myexperiment', dir=DIRPATH, mode='a') as exp:\n run(np.zeros((nsamples, nchannels)),\n experiment=exp, prm=prm, probe=Probe(prb))\n\n shutil.rmtree(dir)\n"
},
{
"alpha_fraction": 0.6173320412635803,
"alphanum_fraction": 0.6601752638816833,
"avg_line_length": 25.810810089111328,
"blob_id": "f102500eb3e8baf363182fa3b8dd5451bef8c6c7",
"content_id": "379b5feb046344a0a4e8accf8a737eeb149138fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1027,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 37,
"path": "/Spikes/CWT_spk_detect.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCWT unsupervised spike detector\r\nCreated on Mon Dec 21 23:47:39 2015\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport sys\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/Plots\")\r\nfrom ImportData import NeuroData\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/Spikes\")\r\nfrom spk_util import *\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/generic\")\r\nfrom MATLAB import *\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom wavelet import *\r\n\r\n\r\ndef bior_wavelet():\r\n \"\"\"\r\n \"\"\"\r\n return\r\n\r\ndef spk_detect_cwt(Vs,ts):\r\n \"\"\"Unsupervised spike detection using continuous wavelet transform\"\"\"\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n datadir = 'D:/Data/Traces/2015/11.November/Data 20 Nov 2015/Slice C.20Nov15.S1.E10.dat'\r\n # Load data\r\n zData = NeuroData(datadir, old=True)\r\n ts = zData.Protocol.msPerPoint\r\n Vs = zData.Current['A']\r\n #Vs = spk_filter(Vs, ts, Wn=[300., 3000.], btype='bandpass')\r\n Vs = spk_window(Vs, ts, [0,5000])"
},
{
"alpha_fraction": 0.5165181756019592,
"alphanum_fraction": 0.5372359156608582,
"avg_line_length": 42.34537124633789,
"blob_id": "53a56ef37447aee9031b17cb2fed2412014d580d",
"content_id": "71b74173de9050a6878f7e6e0ce85529f7ec8ea8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19646,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 443,
"path": "/Plots/simple/barplot.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 07 16:56:32 2015\r\nSimple Bar plot function\r\n@author: Edward\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport copy\r\n# from plots import *\r\n\r\nfrom matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker, AuxTransformBox\r\n\r\n\r\nfrom pdb import set_trace\r\n\r\n\r\n# global variables\r\nfontname = 'D:/Edward/Documents/Assignments/Scripts/Python/Plots/resource/Helvetica.ttf' # font .ttf file path\r\n# platform specific fonts\r\n#import sys\r\n#fontname = {'darwin': 'Helvetica', # Mac\r\n# 'win32':'Arial', # Windows\r\n# 'linux': 'FreeSans', # Linux\r\n# 'cygwin': 'Arial' # use Windows\r\n# }.get(sys.platform)\r\n# fontname = 'Helvetica'\r\nfontsize = {'title':10, 'xlab':8, 'ylab':8, 'xtick':6,'ytick':6,'texts':6,\r\n 'legend': 6, 'legendtitle':6} # font size\r\n\r\n# unit in points. This corresponds to 0.25mm (1pt = 1/72 inch)\r\nbar_line_property = {'border': 0.70866144, 'h_err_bar': 0.70866144, 'v_err_bar': 0.70866144,\r\n 'xaxis_tick': 0.70866144, 'yaxis_tick': 0.70866144, 'xaxis_spine': 0.70866144,\r\n 'yaxis_spine': 0.70866144} # in mm.\r\n\r\ndef barplot(groups, values, errors=None, Ns=None, pos=None,\r\n width=0.5, space=0.1, size=(2,2),\r\n color=['#BFBFBF'], xlabpos='hug', ylab=\"\", set_axis=True,\r\n showvalue=True, showerror=False, bardir='+', border=[0.75, 0.5],\r\n capsize=4, ax=None, iteration=0, numdigit=\"{:.1f}\", xpad=5,\r\n enforce_ylim=False, Ns_color='k', values_color='k',ylim=None,\r\n outsidevalue_thresh_px=20, xticklabdir='horizontal',\r\n DEBUG=False, **kwargs):\r\n \"\"\"Takes 3 inputs and generate a simple bar plot\r\n e.g. groups = ['dog','cat','hippo']\r\n values = [-15, 10, 3]\r\n errors = [3, 2, 1]\r\n Ns = [5, 6, 5]. To be labeled at the base, inside the bar.\r\n pos: position of the bar groups. By defualt, np.arange(ngroups).\r\n Or specify as a list that is the same length as ngroups\r\n width: bar width\r\n space: space between bars within the group\r\n size: figure size, in inches. Input as a tuple. Default (3,3). Better\r\n\r\n color: default grey #BFBFBF\r\n xlabpos: xlabel position.\r\n 'hug': always label at the base of the bar. Positive bars label\r\n underneath, negative bars label above;\r\n 'away': label outside of the graph area\r\n ylab: ylabel string\r\n showvalue: show value of the bar right under the error bar\r\n bardir: direction of the bar to show.\r\n \"+\" only show outward bars\r\n \"-\" only show inward bars.\r\n Otherwise, show both direction\r\n border: additional border to add to the left and right of the bars\r\n [0.75, 0.5]\r\n capsize: errorbar capsize (Default 4 point font)\r\n numdigit: format of the value if showvalue. Default {:.2f}\r\n iteration: number of times / groups to draw the bars.\r\n xpad: padding of xtick labels. Deafult 5\r\n enforce_ylim: enforce y-limit, given by the argument ylim.\r\n Choose [True, False, 0]. Effect of this option depends on\r\n multiple factors. Play to determine the best option.\r\n Ns_color: Ns text color. Default 'k'\r\n values_color: values text color. Deafult 'k'\r\n ylim: specify y-axis limit.\r\n outsidevalue_thresh_px: If bar size is shorter than this number of\r\n pixels, write the value outside the bar. Default 20 pixels.\r\n xticklabdir: set to 'vertical' to rotate the xticklabels\r\n\r\n Use add_comparison to add comparison\r\n \"\"\"\r\n values = np.asarray(values)\r\n errors = np.asarray(errors)\r\n # Get bar plot function according to style\r\n ngroups = len(groups) # group labels\r\n # leftmost position of bars\r\n if pos is None:\r\n pos = np.arange(ngroups)\r\n elif len(pos) != ngroups:\r\n raise(ValueError('Length of argument \"pos\" must be the same as the number of groups of bars'))\r\n\r\n # Adjust spaciing\r\n pos = np.asarray(pos)+0.1+iteration*(width+space)\r\n # initialize the plot\r\n if ax is None:\r\n fig, ax = plt.subplots(nrows=1, ncols = 1, sharex=True, figsize=size)\r\n if ylim is not None:\r\n ax.set_ylim(ylim)\r\n else:\r\n fig = ax.get_figure()\r\n fig.set_size_inches(size)\r\n # errorbar property\r\n errors = np.array(errors)\r\n if errors.ndim==1:\r\n err = np.zeros((2, errors.shape[0]))\r\n for m, v in enumerate(values):\r\n if np.isnan(v):\r\n err[:,m] = 0\r\n continue\r\n if bardir == '+':\r\n d = 1 if v>=0 else 0\r\n elif bardir == '-':\r\n d = 0 if v>=0 else 1\r\n\r\n err[d,m] = errors[m]\r\n else:\r\n err = errors\r\n\r\n # plot the series\r\n rec = ax.bar(pos, values, width, yerr=err, color=color, align='center', capsize=capsize, ecolor='k', edgecolor='k', linewidth=bar_line_property['border'],capstyle='projecting', joinstyle='miter',\r\n error_kw={'elinewidth':bar_line_property['v_err_bar'], 'capthick':bar_line_property['h_err_bar'], 'solid_capstyle':'projecting', 'solid_join_style':'miter'},**kwargs)\r\n rec.pos = pos\r\n # rec.err_height = values+np.sign(values)*(err if np.ndim(err)<2 else np.max(err,axis=0)) # height with errorbar, + or -\r\n\r\n if bardir == \"+\" and (enforce_ylim==True or enforce_ylim is 0):\r\n if all(np.array(values)>0) and max(ax.get_ylim())>=0: # all positive values\r\n ax.set_ylim([0,max(ax.get_ylim())])\r\n elif all(np.array(values)<0) and max(ax.get_ylim())<=0: # all negative values\r\n ax.set_ylim([min(ax.get_ylim()), 0])\r\n else: # has a mix of positive and negative values\r\n pass\r\n elif isinstance(enforce_ylim, (tuple, list)) and len(enforce_ylim)==2:\r\n ax.set_ylim(enforce_ylim) # specified ylim\r\n\r\n if DEBUG:\r\n print(bardir)\r\n\r\n # set axis\r\n if set_axis:\r\n ax.spines['left'].set_linewidth(bar_line_property['xaxis_spine'])\r\n ax.spines['right'].set_linewidth(bar_line_property['xaxis_spine'])\r\n ax.spines['bottom'].set_linewidth(bar_line_property['yaxis_spine'])\r\n ax.spines['top'].set_linewidth(bar_line_property['yaxis_spine'])\r\n ax.xaxis.set_tick_params(width=bar_line_property['xaxis_tick'])\r\n ax.yaxis.set_tick_params(width=bar_line_property['yaxis_tick'])\r\n ax.tick_params(axis='both',direction='out')\r\n ax.spines['left'].set_visible(True)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['bottom'].set_visible(True)\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.set_ticks_position('left')\r\n if ylim is None:\r\n ymin, ymax = ax.get_ybound()\r\n else:\r\n ymin, ymax = ylim\r\n\r\n\r\n if ymax <= 0.0: # only negative data present\r\n if DEBUG:\r\n print('All negative data')\r\n # flip label to top\r\n if ylim is not None:\r\n ax.set_ylim(ylim)\r\n elif enforce_ylim is 0:\r\n ax.spines['bottom'].set_position('zero') # zero the x axis\r\n else:\r\n ax.spines['bottom'].set_position(('data',ymax))\r\n ax.tick_params(labelbottom=False, labeltop=True)\r\n elif ymin >= 0.0: # only positive data present. Default\r\n if DEBUG:\r\n print('All positive data')\r\n if ylim is not None:\r\n ax.set_ylim(ylim)\r\n elif enforce_ylim is 0:\r\n ax.spines['bottom'].set_position('zero') # zero the x axis\r\n else:\r\n ax.spines['bottom'].set_position(('data',ymin))\r\n else: # mix of positive an negative data : set all label to bottoms\r\n if DEBUG:\r\n print('Mix of positive and negative data')\r\n ax.spines['bottom'].set_visible(False)\r\n ax.spines['top'].set_visible(True)\r\n ax.spines['top'].set_position('zero')\r\n if ylim is not None:\r\n ax.set_ylim(ylim)\r\n elif enforce_ylim==True or enforce_ylim is 0: #really strong enforcement\r\n if np.all(np.array(values)>=0):\r\n ax.set_ylim([0, ymax])\r\n else:\r\n ax.set_ylim([ymin, 0])\r\n\r\n # Set x categorical label\r\n ax.xaxis.set_ticks_position('none')\r\n ax.set_xticks(pos)\r\n ax.set_xticklabels(groups, va='center', ha='center', rotation=xticklabdir)\r\n ax.tick_params(axis='x', which='major', pad=xpad)\r\n\r\n yrange = ax.get_ylim()[1]-ax.get_ylim()[0] # axis height in data\r\n yori = (0 - ax.get_ylim()[0])/yrange\r\n def hugxticks(values, ax, yrange, yroi):\r\n if all(np.array(values)>0) or all(np.array(values)<0):\r\n return\r\n\r\n original_y = [a.get_position()[1] for a in ax.get_xticklabels()]\r\n [a.set_y(yori) for a in ax.get_xticklabels()] # move below origin first\r\n plt.draw()\r\n for v, a in zip(values, ax.get_xticklabels()):\r\n txtbb = a.get_window_extent()\r\n _, ymin, _, ymax = tuple(ax.transData.inverted().transform(txtbb).ravel())\r\n ypos = abs((ymin+ymax)/2.0)\r\n if v>=0:\r\n a.set_y(yori + ypos/yrange*1/2)\r\n else:\r\n a.set_y(yori + ypos/yrange*3/2)\r\n\r\n return original_y\r\n # Set x label vertical position\r\n if xlabpos == 'hug':\r\n hugxticks(values, ax, yrange, yori)\r\n\r\n # Set Ns\r\n if Ns is not None:\r\n plt.draw()\r\n #if xlabpos != 'hug':\r\n # original_y = hugxticks(values, ax, yrange, yori)\r\n for i, (v, a, n) in enumerate(zip(values, ax.get_xticklabels(), Ns)):\r\n if np.isnan(v):\r\n continue\r\n txtbb = a.get_window_extent()\r\n xmin, ymin, xmax, ymax = tuple(ax.transData.inverted().transform(txtbb).ravel())\r\n xtext = (xmin+xmax)/2.0\r\n yoffset = ax.transData.inverted().transform((0, 2))[1] - ax.transData.inverted().transform((0,0))[1]\r\n\r\n if enforce_ylim==True or enforce_ylim is 0 or (not(all(values>0)) and not(all(values<0))):\r\n ybase = 0\r\n else:\r\n ybase = ax.get_ylim()[np.argmin(np.abs(ax.get_ylim()))]\r\n\r\n\r\n if abs(ax.transData.transform((0,v))[1] - ax.transData.transform((0,0))[1]) <outsidevalue_thresh_px: # small bar, less than 10 pixels\r\n ke = max(np.abs(err[:,i]))\r\n n_text_color = Ns_color # its on the outside anyway\r\n if v>0:\r\n ytext = ybase + ke + yoffset\r\n va = 'bottom'\r\n elif v==0:\r\n if np.all(np.array(values)>=0):\r\n ytext = ybase + ke + yoffset\r\n va = 'bottom'\r\n else:\r\n ytext = ybase - ke - yoffset\r\n va = 'bottom'\r\n else: # v<0\r\n ytext = ybase - ke - yoffset\r\n va = 'top'\r\n else:\r\n if v>=0:\r\n ytext = ybase + yoffset\r\n va = 'bottom'\r\n else: # v<0\r\n ytext = ybase - yoffset\r\n va = 'top'\r\n if all(np.array(rec[i].get_facecolor()) == np.array([0.,0.,0.,1.])) and Ns_color in ['k', '#000000', [0,0,0], (0,0,0), [0,0,0,1], (0,0,0,1), np.array([0,0,0]), np.array([0,0,0,1])]:\r\n n_text_color = 'w'\r\n else:\r\n n_text_color = Ns_color\r\n # ytext = -(ymin+ymax)/2.0\r\n ax.text(xtext,ytext, \"(\"+str(int(n))+\")\", ha='center',va=va, color=n_text_color)\r\n\r\n if showvalue:\r\n plt.draw()\r\n for (i, v), a in zip(enumerate(values), ax.get_xticklabels()):\r\n if np.isnan(v):\r\n continue\r\n txtbb = a.get_window_extent()\r\n xmin, ymin, xmax, ymax = tuple(ax.transData.inverted().transform(txtbb).ravel())\r\n xtext = (xmin+xmax)/2.0\r\n yoffset = ax.transData.inverted().transform((0, 2))[1] - ax.transData.inverted().transform((0,0))[1]\r\n bar_size = abs(ax.transData.transform((0,v))[1] - ax.transData.transform((0,0))[1])\r\n\r\n\r\n if bar_size < 2*outsidevalue_thresh_px: # small bar, less than 10 pixels\r\n ke = max(np.abs(err[:,i])) # put value outside of errorbar\r\n v_text_color = values_color # its on the outside anyway\r\n if v>0:\r\n ytext = v + ke + yoffset*3 + 2*(bar_size<outsidevalue_thresh_px and Ns is not None) * yoffset\r\n va = 'bottom'\r\n elif v==0:\r\n if np.all(np.array(values)>=0):\r\n ytext = v + ke + yoffset*5\r\n va = 'bottom'\r\n else:\r\n ytext = v - ke - yoffset*5\r\n va = 'bottom'\r\n else: # v<0\r\n ytext = v - ke - yoffset*3 - 2*(bar_size<outsidevalue_thresh_px and Ns is not None) * yoffset\r\n va = 'top'\r\n else:\r\n if v>=0:\r\n ytext = v - yoffset\r\n va = 'top'\r\n else: # v<0\r\n ytext = v + yoffset\r\n va = 'bottom'\r\n if all(np.array(rec[i].get_facecolor()) == np.array([0.,0.,0.,1.])) and values_color in Ns_color in ['k', '#000000', [0,0,0], (0,0,0), [0,0,0,1], (0,0,0,1), np.array([0,0,0]), np.array([0,0,0,1])]:\r\n v_text_color = 'w'\r\n else:\r\n v_text_color = values_color\r\n if not showerror:\r\n ax.text(xtext, ytext, numdigit.format(v), ha='center', va=va, color=v_text_color)\r\n else:\r\n ax.text(xtext, ytext, (numdigit+\"\\n±\\n\"+numdigit).format(v, float(np.nanmean(err[:,i]))), ha='center', va=va, color=v_text_color)\r\n\r\n ax.set_xticks(pos-iteration*(width+space)/2)\r\n # Set ylabel\r\n ax.set_ylabel(ylab)\r\n # Set xaxis limit\r\n ax.set_xlim([min(pos) - border[0], max(pos) + border[1]])\r\n\r\n # Line drawing\r\n setBarplotErrorbarStyle(rec)\r\n equalAxLineWidth(ax)\r\n setAxisLineStyle(ax)\r\n\r\n # Save the figure\r\n# if savepath is not None:\r\n# fig.savefig(savepath, bbox_inches='tight', rasterized=True, dpi=300)\r\n return(fig, ax, rec)\r\n\r\n\r\ndef AdjustAxs(otypes=[np.ndarray], excluded=None):\r\n \"\"\"Used as a decorator to set the axis properties\"\"\"\r\n def wrap(func):\r\n # vectorize the func so that it can be applied to single axis or\r\n # multiple axes\r\n func_vec = np.vectorize(func, otypes=otypes, excluded=excluded)\r\n def wrapper(ax, *args, **kwargs):\r\n res = func_vec(ax, *args, **kwargs)\r\n return(res)\r\n return(wrapper)\r\n return(wrap)\r\n\r\ndef setBarplotErrorbarStyle(rec):\r\n if 'ErrorbarContainer' in str(type(rec)):\r\n children = rec.get_children()\r\n else:\r\n children = rec.errorbar.get_children()\r\n for c in children:\r\n if c is None:\r\n continue\r\n elif isinstance(c, matplotlib.lines.Line2D):\r\n c.set_dash_capstyle = 'projecting'\r\n c.set_dash_joinstyle = 'miter'\r\n c.set_solid_capstyle = 'projecting'\r\n c.set_solid_joinstyle = 'miter'\r\n elif isinstance(c, matplotlib.collections.LineCollection):\r\n try: # for future\r\n c.set_capstyle = 'projecting'\r\n except:\r\n pass\r\n try:\r\n c.set_joinstyle = 'miter'\r\n except:\r\n pass\r\n\r\n@AdjustAxs()\r\ndef equalAxLineWidth(ax, lineproperty ={'xaxis_tick': 0.70866144,\r\n 'yaxis_tick': 0.70866144,\r\n 'xaxis_spine': 0.70866144,\r\n 'yaxis_spine': 0.70866144}):\r\n ax.spines['left'].set_linewidth(bar_line_property['xaxis_spine'])\r\n ax.spines['right'].set_linewidth(bar_line_property['xaxis_spine'])\r\n ax.spines['bottom'].set_linewidth(bar_line_property['yaxis_spine'])\r\n ax.spines['top'].set_linewidth(bar_line_property['yaxis_spine'])\r\n ax.xaxis.set_tick_params(width=bar_line_property['xaxis_tick'])\r\n ax.yaxis.set_tick_params(width=bar_line_property['yaxis_tick'])\r\n\r\n@AdjustAxs()\r\ndef setAxisLineStyle(ax, lineproperty={'xaxis_tick_capstyle':'projecting',\r\n 'xaxis_tick_joinstyle':'miter',\r\n 'yaxis_tick_capstyle':'projecting',\r\n 'yaxis_tick_joinstyle':'miter',\r\n 'xaxis_spine_capstyle':'projecting',\r\n 'xaxis_spine_joinstyle':'miter',\r\n 'yaxis_spine_capstyle':'projecting',\r\n 'yaxis_spine_joinstyle':'miter',\r\n }):\r\n # Ticks\r\n for i in ax.xaxis.get_ticklines():\r\n i._marker._capstyle = lineproperty['xaxis_tick_capstyle']\r\n i._marker._joinstyle = lineproperty['xaxis_tick_joinstyle']\r\n\r\n for i in ax.yaxis.get_ticklines():\r\n i._marker._capstyle = lineproperty['yaxis_tick_capstyle']\r\n i._marker._joinstyle = lineproperty['yaxis_tick_joinstyle']\r\n\r\n # Spines\r\n ax.spines['left']._capstyle = lineproperty['yaxis_spine_capstyle']\r\n ax.spines['left']._joinstyle = lineproperty['yaxis_spine_joinstyle']\r\n ax.spines['right']._capstyle = lineproperty['yaxis_spine_capstyle']\r\n ax.spines['right']._joinstyle = lineproperty['yaxis_spine_joinstyle']\r\n ax.spines['top']._capstyle = lineproperty['xaxis_spine_capstyle']\r\n ax.spines['top']._joinstyle = lineproperty['xaxis_spine_joinstyle']\r\n ax.spines['bottom']._capstyle = lineproperty['xaxis_spine_capstyle']\r\n ax.spines['bottom']._joinstyle = lineproperty['xaxis_spine_joinstyle']\r\n\r\n\r\ndef add_comparison(x_bar=[0.1, 1.1], y_bar=[0.5, 0.5], x_text=0.6, y_text=0.5, text='*', ax=None, va='bottom', ha='center', *args, **kwargs):\r\n if ax is None:\r\n ax = plt.gca()\r\n\r\n ax.plot(x_bar, y_bar, color='k', lw=bar_line_property['h_err_bar'])\r\n ax.text(x_text, y_text, text, va=va, ha=ha,*args, **kwargs)\r\n\r\n return ax\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n savepath='D:/Edward/Documents/Assignments/Scripts/Python/Plots/example/barplot2.eps'\r\n # fig, ax = plt.subplots(1,1)\r\n groups = ['dog','cat','hippo']\r\n values = [-15, 5, 9]\r\n errors = [3, 2, 1]\r\n Ns = [5, 13, 4]\r\n # ax.bar(np.arange(3), values, width=0.2, color='b', align='center')\r\n fig, ax, rec1 = barplot(groups, values, errors, Ns, width=0.3, space=0.05, numdigit=\"{:d}\", ylab='weight gained (kg)', iteration=0)\r\n\r\n groups = ['dog', 'cat', 'hippo']\r\n values = [-13, 6, 8]\r\n errors = [2, 3, 1]\r\n Ns = [8, 10, 7]\r\n # ax.bar(np.arange(3)+0.2, values, width=0.2, color='r', align='center')\r\n # Draw another group bars next to the previous group\r\n fig, ax, rec2 = barplot(groups, values, errors, Ns, width=0.3, space=0.1, numdigit=\"{:d}\", ylab='weight gained (kg)', iteration=1, ax=ax, size=(5,5))\r\n\r\n #SetFont(ax, fig, fontsize=fontsize, fontname='Helvetica')\r\n # fig.savefig(savepath, bbox_inches='tight', rasterized=True, dpi=300)\r\n"
},
{
"alpha_fraction": 0.6532881855964661,
"alphanum_fraction": 0.6692456603050232,
"avg_line_length": 29.363636016845703,
"blob_id": "b38f740e0a65bcfff44a2a5cbf5842b25a3452cb",
"content_id": "5fdcb4e95e7515f8f56fb9e0dd93d002b985a64a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2068,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 66,
"path": "/python_tutorials/PythonForDataAnalysis/Chapter_3_IPython.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 04 00:07:53 2014\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n# Poor man's debugger functions\r\ndef set_trace():\r\n # put this function right before the exception occurs to debug. Press \"c\"\r\n # to resume the program;\r\n from IPython.core.debugger import Pdb\r\n Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)\r\n \r\ndef debug(f,*args, **kwargs):\r\n # allows arbitrarily calling debugger for a function. Press \"c\" to resume\r\n # the function; press \"s\" to step through each line of the function\r\n from IPython.core.debugger import Pdb\r\n pdb = Pdb(color_scheme='Linux')\r\n return pdb.runcall(f,*args, **kwargs)\r\n \r\n# example usage of debug function\r\ndef f(x,y,z=1):\r\n tmp=x+y\r\n tmp= x*tmp*y\r\n return tmp/z\r\n\r\ndebug(f,1,2,z=3)\r\n\r\n\r\n# Checking running time within IPython\r\nstrings = ['foo','foobar','baz','qux','python', 'Guido Van Rossum'] *100000\r\n%time method1 = [x for x in strings if x.startswith('foo')]# get timing only once\r\n%timeit method2 = [x for x in strings if x[:3]=='foo']# get the best time\r\n\r\n\r\n# Profiling programs: %prun and %run -p\r\n# example: \r\nimport numpy as np\r\nfrom numpy.linalg import eigvals\r\ndef run_experiment(niter=100):\r\n K = 100\r\n results = []\r\n for _ in xrange(niter):\r\n mat = np.random.randn(K, K)\r\n max_eigenvalue = np.abs(eigvals(mat).max())\r\n results.append(max_eigenvalue)\r\n return results\r\n \r\nsome_results = run_experiment()\r\nprint 'Largest one we saw %s' % np.max(some_results)\r\n \r\n# run the above example with profiling: suppose the above scrips is in cprof_example.py\r\npython -m cProfile -s cprof_example.py\r\n\r\n# prun will profile a python statement rather than a .py file\r\n%prun -l 7 -s cumulative run_experiment()\r\n\r\n# reloading module dependencies: unlike MATLAB, Python does not reload changes\r\n# automatically. To refresh the modified dependencies, use\r\nimport some_lib\r\nreload(some_lib)\r\n# IPython have deep/recursive reload if there are deeper dependency changes\r\ndreload(some_lib)\r\n \r\n# This concludes today's study"
},
{
"alpha_fraction": 0.6879432797431946,
"alphanum_fraction": 0.6879432797431946,
"avg_line_length": 26.200000762939453,
"blob_id": "dcdf545a03a0d99d7dae1937c2dd1f7649039c79",
"content_id": "1453b0a577fb3e06f360e046894fa6d7dbe36a0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 5,
"path": "/python_tutorials/ThinkPython/python_test_01.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "import sys\r\nsys.path.append(\"C:\\\\Users\\\\Edward\\Documents\\\\Assignments\\\\Python\");\r\n\r\n#import subprocess;\r\n#subprocess.check_call([\"cd ..\"]);\r\n"
},
{
"alpha_fraction": 0.6317057609558105,
"alphanum_fraction": 0.6662824749946594,
"avg_line_length": 28.27914047241211,
"blob_id": "b0b0e648fe6cfbf369f9b69ec9488ae774bce691",
"content_id": "ae6ac8f3daeaa235c292bf0ebdd1164e22ec539d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9544,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 326,
"path": "/python_tutorials/practice_notes_7.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\n# Day 7: January 4, 2013\n\n##############################################################################\n# Class and Object: user defined types\nclass Point(object): #notice that \"class\" and \"object\" are keywords\n \"\"\"Represents a point in 2D space.\"\"\" #annotation\n pass;\n \nprint(Point);\n#>>><class '__main__.Point'>\n\n#an instance of a class\nblank=Point();\nprint(blank);\n#>>><__main__.Point object at 0x0000000003130860>\n\n#Assigning attritubes to the class:\n#This is very similar to structures in MATLAB\n#the following assign x and y attribute (in MATLAB, fields) to instance blank\nblank.x=3.0;\nblank.y=4.0;\nprint(\"X-coordinate:\",blank.x);\nprint(\"Y-coordinate:\",blank.y);\n#>>>X-coordinate: 3.0\n#>>>Y-coordinate: 4.0\n#we may also do this:\nprint('(%g,%g)' %(blank.x,blank.y));\n#>>>(3,4)\n#it is also possible to call functions and methods with attributes\nimport math;\ndistance=math.sqrt(blank.x**2+blank.y**2); #note ** replaces ^ in Python 3\nprint(distance);\n#>>>5.0;\n\ndef distance_between_points(p1,p2):\n \"\"\"take in two Points objects and calculate their distance\"\"\"\n import math;\n distance=math.sqrt((p1.x-p2.x)**2+(p1.y-p2.y)**2);\n return distance;\n\npoint_1=Point();\npoint_1.x=2.3;\npoint_1.y=3.6;\npoint_2=Point();\npoint_2.x=10.2;\npoint_2.y=15.3;\n\nDIST=distance_between_points(point_1,point_2);\nprint(DIST);\n#>>>14.117365193264641\n\ndef print_point(p):\n print(\"(%g,%g)\" %(p.x,p.y));\n\n\n# Rectangle, with Points embedded\nclass Rectangle(object):\n \"\"\"Represents a rectangle.\n attributes: width, height, corner.\n \"\"\"\n pass;\n\nbox=Rectangle();\nbox.width=100.0;\nbox.height=200.0;\nbox.corner=Point();#Point object is embedded within Rectangle instance\nbox.corner.x=0.0;\nbox.corner.y=0.0;\n\n#instance can be a return value\ndef find_center(rect):\n p=Point();\n p.x=rect.corner.x+rect.width/2.0;\n p.y=rect.corner.y+rect.height/2.0;\n return p;\n\ncenter=find_center(box);\nprint_point(center);\n#>>>(50,100)\n\n#Customized objects are mutable\nprint(box.width);\n#>>>100.0\nbox.width=box.wdith+10;\nprint(box.width);\n#>>>110.0\n\n#Since they are mutable, there may be potentially problems wth aliasing\n#however, there si a module \"copy\" we can use to duplicate the object\nimport copy;\n\nbox2=copy.copy(box); #shallow copy, which does not copy the embedded elements\n\nbox is box2;\n#>>>False\nbox==box2;\n#>>>False #because in object, \"==\" operator is the same as \"is\" operator\n\n#Also, the shallow copy does not copy the embeded objects\nbox.corner is box2.corner;\n#>>>True\n\n#to do a deep copy, use copy.deepcopy\nbox3=copy.deepcopy(box);\nbox.corner is box3.corner;\n#>>>False\n\n#if uncertain what attributes that an object have, use hasattr(object,'attr');\nhasattr(box,'x');\n#>>>False\nhasattr(box,'corner');\n#>>>True\n\n##############################################################################\n# Class and Function\nclass Time(object):\n \"\"\"Represents the time of the day.\n attributes: hour, minute, second\n \"\"\"\n pass;\n\ntime=Time();\ntime.hour=11;\ntime.minute=59;\ntime.second=30;\n\ndef print_time(t):\n print('%.2d:%.2d:%.2d' %(t.hour,t.minute,t.second));\n#note that %.2d prints 2 digits\n\n#Pure functions and Modifiers:\ndef add_time(t1,t2): #pure function\n \"\"\"Adding two time\"\"\"\n SUM = Time();\n SUM.hour=t1.hour+t2.hour;\n SUM.minute=t1.minute+t2.minute;\n SUM.second=t1.second+t2.second;\n return SUM;\n\n#pure function does not modify any of the objects passed onto its arguments\n#in this case, t1 and t2 are not changed at all\n\n#Test the function\nstart=Time();#specifying start time\nstart.hour=9;\nstart.minute=45;\nstart.second=0;\n\nduration=Time();#specifying duration\nduration.hour=1;\nduration.minute=35;\nduration.second=0;\n\nendTime=add_time(start,duration);#calculating end time\nprint_time(endTime);#print end time\n#>>>10:80:00\n\n#however, this is not what we expected for time in real life, therefore, we\n#need modifier functions\ndef increment(time,seconds):#a modifer function changes its input\n time.second+=seconds; #increase the time by specified seconds\n \n if time.second>60:#if second greater than 60\n time.minute+=time.second//60;#increase minute by quotient\n time.second=time.second%60;#find the remainder after dividing 60\n \n if time.minute>=60:\n time.hour+=time.minute//60;\n time.minute=time.minute%60;\n#we may also invoke a recursion in the function, but it may be less efficient\n\nincrement(endTime,0);\nprint_time(endTime);\n#>>>11:20:00\n\n# Prototype vs. Patch: write, test, and retest to correct errors\n#we can either write a pure function that includes all the algorithms,\n#or we can create different parts of that function by creating simpler\n#individual functions which can be called into another function that\n#carries out the goal. This is called planned development, which usually\n#involves high-level insights that breaks down the problem.\n\n##############################################################################\n# Class and Method:\n#Difference between method and function:\n#1). Methods are defined inside a class in order to make the relationship\n#between class and the method clear\n#2). They syntax for invoking a method is different from the syntax for calling\n#a function\n\n#To create a method inside a class is like create a function, except it is\n#under the class object, rather than the __main__\nclass Time(object):\n def print_time(time):#this first parameter of the method is usually called\n #self, so we may use \"self\" instead of \"time\"\n print('%.2d:%.2d:%.2d' %(time.hour,time.minute,time.second));\n\n#Testing\nStartTime=Time();\nStartTime.hour=2;\nStartTime.minute=34;\nStartTime.second=31;\nTime.print_time(StartTime);#now print_time is a method of Time\n#>>>02:34:31\n#we can also use method syntax to get the same result\nStartTime.print_time();\n#>>>02:34:31\n#in this case, \"StartTime\" is the subject with method \"print_time\"\n\n#We now creates several methods for class Time. Note that it is important\n#to leave NO empty line between each method, at least in Komodo.\ndef int_to_time(seconds):\n \"\"\"convert seconds in integer to a time object\"\"\"\n time=Time();\n minutes,time.second=divmod(seconds,60);\n time.hour,time.minute=divmod(minutes,60);\n return time;\n#the reason not to put this function inside Time as a method: the input is\n#an integer, not a Time object.\n\nclass Time(object):\n def print_time(self):\n \"\"\"print time object\"\"\"\n print('%.2d:%.2d:%.2d' %(self.hour,self.minute,self.second));\n def time_to_int(self):\n \"\"\"convert a time object to integer\"\"\"\n minutes=self.hour*60+self.minute;\n seconds=minutes*60+self.second;\n return seconds;\n def increment(self,seconds):\n \"\"\"increase a time object by a specified seconds\"\"\"\n seconds+=self.time_to_int();\n return int_to_time(seconds);\n def is_after(self,other):\n \"\"\"check if a time is after another time\"\"\"\n return self.time_to_int()>other.time_to_int();\n def __init__(self,hour=0,minute=0,second=0):\n \"\"\"__init__ method initilize the object with default values\"\"\"\n self.hour=hour;\n self.minute=minute;\n self.second=second;\n def __str__(self):\n \"\"\"convert the object to a string. This allows the object to be\n printed directly using 'print'. \"\"\"\n return '%.2d:%.2d:%.2d' %(self.hour,self.minute,self.second);\n def add_time(self,other):\n \"\"\"allows the addition of two times given\"\"\"\n seconds=self.time_to_int()+other.time_to_int();\n return int_to_time(seconds);\n def __add__(self,other):#this __add__ method checks type of \"other\"\n \"\"\"adds time together\"\"\"\n if isinstance(other,Time):\n return self.add_time(other);\n elif isinstance(other,int):\n return self.increment(other);\n def __radd__(self,other):\n \"\"\"gives communitative property of addition to the class object\"\"\"\n return self.__add__(other);\n\n#testing\nstart=Time();\nstart.hour=1;\nstart.minute=32;\nstart.second=41;\nend=Time();\nend.hour=2;\nend.minute=34;\nend.second=24;\nend.is_after(start);#chekc to see if end time is after start time\n#>>>True\n\n#testing __init__ method\ntime=Time();\ntime.print_time();\n#>>>00:00:00\ntime=Time(9);\ntime.print_time();\n#>>>09:00:00\ntime=Time(9,30);\ntime.print_time();\n#>>>09:30:00\ntime=Time(9,30,42);\ntime.print_time();\n#>>>09:30:42\n\n#testing __str__ method\ntime=Time(9,45);\nprint(time);#\"print\" invokes \"__str__\" method\n#>>>09:45:00\n\n#testing __add__ method\nstart=Time(9,45);\nduration=Time(1,35);\nprint(start+duration); #the \"+\" should invoke \"__add__\" method\n#>>>11:20:00\nduration=30;#30 seconds of duration\nprint(start+duration);\n#>>>09:45:30\n#however, the addition is not communitative\nprint(duration+start);\n#>>>TypeError: unsupported operand type(s) for +: 'int' and 'Time'\n#this can be solved using __radd__ or \"right_side add\"\n#it is invoked when the Time object is appears on the right side of the\n#\"+\" operator\n#after adding __radd__ method, try add again\nstart=Time(9,45);\nduration=30;\nprint(duration+start);\n#>>>09:45:30;\n\n# Polymorphism: functions that can work with several types\n#for example, sum() is polymorphic and adds up the objects as long as the object\n#itself supports addition\nt1=Time(7,43);\nt2=Time(7,41);\nt3=Time(7,37);\ntotal=sum([t1,t2,t3]);\nprint(total);\n#>>>23:01:00\n\n#Use __dict__ method to print out a dictionary of attributes and values\nprint(t1.__dict__);\n#>>>{'hour': 7, 'minute': 43, 'second': 0}\n\n#This concludes today's study."
},
{
"alpha_fraction": 0.5243932604789734,
"alphanum_fraction": 0.5438770055770874,
"avg_line_length": 42.60881423950195,
"blob_id": "ce5456904a5ff0cc2f5ffffc35d70d70a3f861f5",
"content_id": "2222d30479f47fc79147272773cd71c8ce85b87a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32386,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 726,
"path": "/Spikes/ImportData.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 27 19:24:54 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport zipfile\r\nimport six\r\nimport re\r\nfrom pdb import set_trace\r\nfrom collections import OrderedDict\r\n# import matplotlib.pyplot as plt\r\n\r\ndef readVBString(fid):\r\n stringLength = int(np.fromfile(fid, np.int16, 1))\r\n if stringLength==0:\r\n return('')\r\n else:\r\n # if python 2\r\n if six.PY2:\r\n return(''.join(np.fromfile(fid, '|S1', stringLength)))\r\n # if python 3\r\n elif six.PY3:\r\n tmp = np.fromfile(fid, '|S1', stringLength)\r\n return(np.ndarray.tostring(tmp).decode('UTF-8'))\r\n\r\nclass Protocol(object): # for composition\r\n pass\r\n\r\nclass NeuroData(object):\r\n \"\"\"Read electrophysiology data file\r\n \"\"\"\r\n def __init__(self, dataFile=None, old=False, *args, **kwargs):\r\n \"\"\"Initialize class\"\"\"\r\n self.Voltage = {}\r\n self.Current = {}\r\n self.Stimulus = {}\r\n self.Protocol = Protocol() # composition\r\n if dataFile is not None and isinstance(dataFile, str):\r\n # load directly if all the conditions are met\r\n self.LoadData(dataFile=dataFile, old=old, *args, **kwargs)\r\n else:\r\n IOError('Unrecognized data file input')\r\n\r\n def LoadData(self, dataFile, old=True, *args, **kwargs): #old=True to be edited later\r\n \"\"\"Load data in text file\"\"\"\r\n dataFile = dataFile.replace('\\\\','/')# make sure using forward slash\r\n # check file exists\r\n if not os.path.isfile(dataFile):\r\n IOError('%s does not exist' %dataFile)\r\n # Evoke proper load method\r\n if old:\r\n self.LoadOldDataFile(dataFile, *args, **kwargs)\r\n else:\r\n self.LoadDataFile(dataFile, *args, **kwargs)\r\n\r\n def LoadDataFile(self, dataFile, infoOnly=False, getTime=False):\r\n \"\"\"Read zipped data file (new format)\"\"\"\r\n archive = zipfile.ZipFile(dataFile, 'r')\r\n # Check if the file is a valid zipfile\r\n if not archive.is_zipfile():\r\n IOError('%s is not a valid zip file'%dataFile)\r\n # read header txt file\r\n fid = archive.read('header.txt','r')\r\n self.Protocol.infoBytes = np.fromfile(fid, np.int32, 1) # size of header\r\n # ... etc\r\n\r\n def LoadOldDataFile(self, dataFile, numChannels=4, infoOnly=False, getTime=False):\r\n \"\"\"Read Old .dat format data file\"\"\"\r\n self.Protocol.numChannels = numChannels # hard set\r\n self.Protocol.readDataFrom = os.path.abspath(dataFile).replace('\\\\','/') # store read location\r\n with open(dataFile, 'rb') as fid:\r\n fid.seek(6, 0) # set to position 6 from the beginning of the file\r\n self.Protocol.infoBytes = np.fromfile(fid, np.int32, 1) # size of header\r\n self.Protocol.sweepWindow = np.fromfile(fid, np.float32, 1)[0] #in msec per episode\r\n self.Protocol.msPerPoint = np.fromfile(fid, np.float32, 1)[0] / 1000.0 # in microseconds per channel, divided by 1000 to msec\r\n self.Protocol.numPoints = np.fromfile(fid, np.int32, 1)[0] # number of data points\r\n self.Protocol.WCtime = np.fromfile(fid, np.float32, 1)[0] # in seconds since went whole cell\r\n self.Protocol.WCtimeStr = self.epiTime(self.Protocol.WCtime)\r\n self.Protocol.drugTime = np.fromfile(fid, np.float32,1)[0] # in seconds since most recent drug started\r\n self.Protocol.drugTimeStr = self.epiTime(self.Protocol.drugTime)\r\n self.Protocol.drug = int(np.fromfile(fid,np.float32,1)[0]) #an integer indicating what drug is on\r\n\r\n #% new from BWS on 12/21/08\r\n np.fromfile(fid,np.int32,1) # simulated data\r\n fid.seek(48 , 0)\r\n self.Protocol.genData = np.fromfile(fid, np.float32, 56) # [need expansion]\r\n\r\n # read in TTL information\r\n self.Protocol.ttlData = []\r\n ttlDataStr = \"\"\r\n chanCounter = 0\r\n for index in range(self.Protocol.numChannels):\r\n fid.seek(10, 1) # 10 is for VB user-defined type stuff\r\n self.Protocol.ttlData.append(np.fromfile(fid, np.float32, 17)) #[need expansion]\r\n ttlDataStr += self.generateTTLdesc(chanCounter, self.Protocol.ttlData[-1])\r\n chanCounter += 1\r\n #print(fid.tell())\r\n \r\n self.Protocol.ttlDict = []\r\n for index, ttlData in enumerate(self.Protocol.ttlData):\r\n self.Protocol.ttlDict.append(self.parseTTLArray_old(ttlData))\r\n \r\n # read in DAC information\r\n self.Protocol.dacData = []\r\n self.Protocol.dacName = []\r\n dacDataStr = \"\"\r\n chanCounter = 0\r\n for index in range(self.Protocol.numChannels):\r\n fid.seek(10, 1) # 10 is for VB user-defined type stuff\r\n self.Protocol.dacData.append(np.fromfile(fid, np.float32, 42)) #[need exspansion]\r\n self.Protocol.dacName.append(readVBString(fid))\r\n dacDataStr += self.generateDACdesc(chanCounter, self.Protocol.dacData[-1])\r\n chanCounter += 1\r\n\r\n #print(fid.tell())\r\n # Get other parameters\r\n self.Protocol.classVersionNum = np.fromfile(fid, np.float32, 1)[0]\r\n self.Protocol.acquireComment=readVBString(fid)\r\n self.Protocol.acquireAnalysisComment=readVBString(fid)\r\n self.Protocol.drugName=readVBString(fid)\r\n self.Protocol.exptDesc=readVBString(fid)\r\n self.Protocol.computerName=readVBString(fid)\r\n self.Protocol.savedFileName=os.path.abspath(readVBString(fid)).replace('\\\\','/')\r\n self.Protocol.fileName = self.Protocol.savedFileName\r\n self.Protocol.linkedFileName=os.path.abspath(readVBString(fid)).replace('\\\\','/')\r\n self.Protocol.acquisitionDeviceName=readVBString(fid)\r\n self.Protocol.traceKeys=readVBString(fid)\r\n self.Protocol.traceInitValuesStr=readVBString(fid)\r\n self.Protocol.extraScalarKeys=readVBString(fid)\r\n self.Protocol.extraVectorKeys=readVBString(fid)\r\n self.Protocol.genString=readVBString(fid)\r\n self.Protocol.TTLstring = []\r\n for index in range(self.Protocol.numChannels):\r\n self.Protocol.TTLstring.append(readVBString(fid))\r\n self.Protocol.ampDesc = []\r\n for index in range(self.Protocol.numChannels):\r\n self.Protocol.ampDesc.append(readVBString(fid))\r\n\r\n # Stimulus description\r\n self.Protocol.stimDesc = (dacDataStr.strip() + \" \" + ttlDataStr.strip() + \" \" + self.Protocol.acquireComment).strip()\r\n\r\n # Get Channel info\r\n channelDict = {'VoltADC1':'VoltA','VoltADC3':'VoltB',\r\n 'VoltADC5':'VoltC','VoltADC7':'VoltD',\r\n 'CurADC0':'CurA','CurADC2':'CurB',\r\n 'CurADC4':'CurC','CurADC6':'CurD',\r\n 'StimulusAmpA':'StimA',\r\n 'StimulusAmpB':'StimB',\r\n 'StimulusAmpC':'StimC',\r\n 'StimulusAmpD':'StimD',\r\n 'StimulusAmpA9':'StimA',\r\n 'StimulusAmpB9':'StimB',\r\n 'StimulusAmpC9':'StimC',\r\n 'StimulusAmpD9':'StimD'}\r\n keys = [k.split(\"/\")[0] for k in self.Protocol.traceKeys.split()]\r\n self.Protocol.channelNames = [channelDict[k] for k in keys]\r\n self.Protocol.numTraces = len(self.Protocol.channelNames)\r\n\r\n if infoOnly: # stop here if only\r\n return\r\n\r\n # print(fid.tell())\r\n # Read trace data\r\n self.Protocol.traceDesc = []\r\n for chan in self.Protocol.channelNames:\r\n traceFactor = float(np.fromfile(fid, np.float32, 1))\r\n traceLength = int(np.fromfile(fid, np.int32, 1))\r\n traceDesc = readVBString(fid)\r\n self.Protocol.traceDesc.append(traceDesc)\r\n traceData = np.fromfile(fid, np.int16, traceLength)\r\n traceData = traceFactor * traceData\r\n if chan[0] == 'V':\r\n self.Voltage[chan[-1]] = traceData\r\n elif chan[0] == 'C':\r\n self.Current[chan[-1]] = traceData\r\n elif chan[0] == 'S':\r\n self.Stimulus[chan[-1]] = traceData\r\n else: # fallthrough\r\n TypeError('Unrecognized channel type')\r\n\r\n if getTime:\r\n self.Time = np.arange(self.Protocol.numPoints) * self.Protocol.msPerPoint\r\n\r\n # close file\r\n fid.close()\r\n\r\n def TTL2Stim_old(self, TTLarray):\r\n TTL = self.parseTTLArray_old(TTLarray)\r\n TTL_trace = np.arange(0, duration+ts, ts)\r\n return TTL_trace\r\n \r\n \r\n @staticmethod\r\n def epiTime(inTime):\r\n \"\"\"Convert seconds into HH:MM:SS\"\"\"\r\n if inTime>=3600:\r\n hh = int(inTime//3600)\r\n mm = int((inTime - hh*3600)//60)\r\n ss = inTime - hh*3600 - mm*60\r\n return \"{:0d}:{:02d}:{:0.0f}\".format(hh, mm, ss)\r\n elif inTime>=60:\r\n mm = int(inTime // 60)\r\n ss = inTime - mm *60\r\n return \"{:0d}:{:02.0f}\".format(mm, ss)\r\n else:\r\n return(\"{:0.1f} sec\".format(inTime))\r\n\r\n @staticmethod\r\n def generateDACdesc(chanNum, data):\r\n # revised 13 July 2015 BWS\r\n step = \"\"\r\n pulse = \"\"\r\n result = \"\"\r\n if data[0]:\r\n if data[1] and data[2]:\r\n step = \"Step \" + \"{:0.0f}\".format(data[8]) + \" (\" + \"{:0.0f}\".format(data[6]) + \" to \" + \\\r\n \"{:0.0f}\".format(data[7]) + \" ms)\"\r\n if data[14]:\r\n if data[17] != 0:\r\n pulse += \"PulseA \" + \"{:0.0f}\".format(data[17]) + \" [{:0.0f},{:0.0f}]\".format(data[15], data[16]) + \";\"\r\n if data[20] != 0:\r\n pulse += \"PulseB \" + \"{:0.0f}\".format(data[20]) + \" [{:0.0f},{:0.0f}]\".format(data[18], data[19]) + \";\"\r\n if data[23] != 0:\r\n pulse += \"PulseC \" + \"{:0.0f}\".format(data[23]) + \" [{:0.0f},{:0.0f}]\".format(data[21], data[22]) + \";\"\r\n if len(step) > 0 or len(pulse) > 0:\r\n result = \"DAC\" + str(chanNum) + \": \"\r\n if len(step) > 0:\r\n result += step.strip() + \" \"\r\n if len(pulse) > 0:\r\n result += pulse.strip()\r\n return result.strip()\r\n\r\n @staticmethod\r\n def generateTTLdesc(chanNum, data):\r\n # revised 13 July 2015 BWS\r\n SIU = None\r\n Puff = None\r\n if data[0]: # global enable\r\n tempStr = \"\"\r\n if data[5]: # single SIU enable\r\n for k in range(6,10):\r\n if data[k] > 0.:\r\n tempStr += str(data[k]) + \" ms \"\r\n if data[10]: # SIU train enable\r\n tempStr += \" train\"\r\n if len(tempStr) > 0:\r\n SIU = \"SIU \" + tempStr\r\n tempStr = \"\"\r\n if data[2]: # TTL step enable\r\n Puff = \"Puff \" + str(data[4]) + \"ms\"\r\n if SIU or Puff:\r\n retStr = \"TTL\" + str(chanNum) + \": \"\r\n if Puff:\r\n retStr += Puff + \" \"\r\n if SIU:\r\n retStr += SIU\r\n else:\r\n retStr = \"\"\r\n return retStr.strip()\r\n \r\n @staticmethod\r\n def parseTTLArray_old(TTLarray):\r\n \"\"\"Convert the TTL array into meaningful dictionary\"\"\"\r\n TTL = OrderedDict()\r\n TTL['is_on'] = TTLarray[0]\r\n TTL['use_AWF'] = TTLarray[1]\r\n TTL['Step_is_on'] = TTLarray[2]\r\n TTL['Step_Latency'] = TTLarray[3]\r\n TTL['Step_Duration'] = TTLarray[4]\r\n TTL['SIU_Single_Shocks_is_on'] = TTLarray[5]\r\n TTL['SIU_A'] = TTLarray[6]\r\n TTL['SIU_B'] = TTLarray[7]\r\n TTL['SIU_C'] = TTLarray[8]\r\n TTL['SIU_D'] = TTLarray[9]\r\n TTL['SIU_Train_is_on'] = TTLarray[10]\r\n TTL['SIU_Train_of_Bursts_is_on'] = TTLarray[11]\r\n TTL['SIU_Train_Start'] = TTLarray[12]\r\n TTL['SIU_Train_Interval'] = TTLarray[13] # stimulate every x ms\r\n TTL['SIU_Train_Number'] = TTLarray[14]\r\n TTL['SIU_Train_Burst_Interval'] = TTLarray[15]\r\n TTL['SIU_Train_Burst_Number'] = TTLarray[16]\r\n return TTL\r\n\r\n @staticmethod\r\n def parseGenArray_old(GenArray):\r\n \"\"\"Convert genData array into meaningful dictionary\"\"\"\r\n Gen = OrderedDict()\r\n Gen['chantype'] = GenArray[3:11]\r\n Gen['chanGain'] = GenArray[11:19]\r\n Gen['chanExtGain'] = GenArray[19:27]\r\n Gen['AuxTTlEnable'] = GenArray[51]\r\n Gen['extTrig'] = GenArray[52]\r\n Gen['SIUDuration'] = GenArray[53]\r\n Gen['episodicMode'] = GenArray[54]\r\n Gen['programCode'] = GenArray[55]\r\n return Gen\r\n \r\n\r\ndef load_trace(cellname, basedir='D:/Data/Traces', old=True, infoOnly=False, *args, **kwargs):\r\n \"\"\"Wrapper function to load NeuroData, assuming the data structure we have\r\n implemented in get_cellpath\"\"\"\r\n if isinstance(cellname, (list, tuple, np.ndarray)):\r\n cellname = \".\".join(list(cellname))\r\n cell_path = os.path.join(basedir, get_cellpath(cellname))\r\n zData = NeuroData(dataFile=cell_path, old=old, infoOnly=infoOnly, *args, **kwargs)\r\n return zData\r\n\r\n\"\"\" 2photon image data\"\"\"\r\nclass ImageData(object):\r\n \"\"\"Read image data file\r\n \"\"\"\r\n def __init__(self, dataFile=None, old=False, *args, **kwargs):\r\n \"\"\"Initialize class\"\"\"\r\n self.img = None\r\n self.Protocol = Protocol()\r\n\r\n if dataFile is not None and isinstance(dataFile, str):\r\n # load directly if all the conditions are met\r\n self.LoadData(dataFile=dataFile, old=old, *args, **kwargs)\r\n else:\r\n raise(IOError('Unrecognized data file input'))\r\n\r\n def LoadData(self, dataFile, old=True, *args, **kwargs): #old=True to be edited later\r\n \"\"\"Load data in text file\"\"\"\r\n dataFile = dataFile.replace('\\\\','/')# make sure using forward slash\r\n # check file exists\r\n if not os.path.isfile(dataFile):\r\n raise(IOError('%s does not exist' %dataFile))\r\n # Evoke proper load method\r\n if old:\r\n self.LoadOldDataFile(dataFile, *args, **kwargs)\r\n else:\r\n self.LoadDataFile(dataFile, *args, **kwargs)\r\n\r\n def LoadDataFile(self, dataFile, infoOnly=False):\r\n raise(NotImplementedError(\"Cannot load new data format yet\"))\r\n\r\n def LoadOldDataFile(self, dataFile, infoOnly=False):\r\n \"\"\" Read a .img file\"\"\"\r\n fid = open(dataFile, 'rb')\r\n self.Protocol.FileName = dataFile\r\n self.Protocol.BitDepth = 12\r\n self.Protocol.ProgramNumber = np.fromfile(fid, np.int32, 1)\r\n if self.Protocol.ProgramNumber == 2:\r\n fid.close()\r\n self.loadQuantixFile(dataFile)\r\n return\r\n\r\n self.Protocol.ProgramMode = np.fromfile(fid, np.int32, 1)[0]\r\n self.Protocol.DataOffset = np.fromfile(fid, np.int32, 1)[0]\r\n self.Protocol.Width = np.fromfile(fid, np.int32, 1)[0]\r\n self.Protocol.Height = np.fromfile(fid, np.int32, 1)[0]\r\n self.Protocol.NumImages = np.fromfile(fid, np.int32, 1)[0]\r\n self.Protocol.NumChannels = np.fromfile(fid, np.int32, 1)[0]\r\n self.Protocol.Comment = readVBString(fid)\r\n self.Protocol.MiscInfo = readVBString(fid)\r\n self.Protocol.ImageSource = readVBString(fid)\r\n self.Protocol.PixelMicrons = np.fromfile(fid, np.float32, 1)[0]\r\n self.Protocol.MillisecondPerFrame = np.fromfile(fid, np.float32, 1)[0]\r\n self.Protocol.Objective = readVBString(fid)\r\n self.Protocol.AdditionalInformation = readVBString(fid)\r\n self.Protocol.SizeOnSource = readVBString(fid)\r\n self.Protocol.SourceProcessing = readVBString(fid)\r\n\r\n # fix calibration parameters\r\n if not self.Protocol.PixelMicrons or self.Protocol.PixelMicrons == 0:\r\n if self.Protocol.Objective.upper() == 'OLYMPUS 60X/0.9':\r\n self.Protocol.PixelMicrons = 103.8 / float(re.match('Zoom = (\\d+)', self.Protocol.SourceProcessing, re.M|re.I).group(1)) / self.Protocol.Width\r\n elif self.Protocol.Objective.upper() == 'OLYMPUS 40X/0.8':\r\n self.Protocol.PixelMicrons = 163 / float(re.match('Zoom = (\\d+)', self.Protocol.SourceProcessing, re.M|re.I).group(1)) / self.Protocol.Width\r\n\r\n self.Protocol.Origin = []\r\n for n, c in enumerate(['X','Y','Z']):\r\n coord = r\"(?<=\" + c + r\" = )[\\d.-]+\"\r\n coord = re.search(coord, self.Protocol.MiscInfo)\r\n if coord:\r\n self.Protocol.Origin.append(float(coord.group(0)))\r\n else:\r\n self.Protocol.Origin.append(None)\r\n\r\n self.Protocol.delta = [self.Protocol.PixelMicrons, self.Protocol.PixelMicrons, self.Protocol.PixelMicrons]\r\n\r\n # information for convenience\r\n self.Xpixels = self.Protocol.Width\r\n self.Ypixels = self.Protocol.Height\r\n self.numChannels = self.Protocol.NumChannels\r\n self.numFrames = self.Protocol.NumImages\r\n\r\n if not infoOnly:\r\n # read image data\r\n fid.seek(self.Protocol.DataOffset - 1, 0)\r\n self.img = np.zeros((self.Protocol.Height, self.Protocol.Width, self.Protocol.NumImages), dtype=np.int16)\r\n # set_trace()\r\n for x in range(self.Protocol.NumImages):\r\n tmp = np.fromfile(fid, np.int16, self.Protocol.Width * self.Protocol.Height) # / 4096\r\n self.img[:,:,x] = tmp.reshape((self.Protocol.Width, self.Protocol.Height), order='F').T\r\n # Rotate the image\r\n # self.img = self.img[:, ::-1, :]\r\n else:\r\n self.img = -1\r\n\r\n fid.close()\r\n\r\n def loadQuantixFile(self, dataFile, infoOnly=False):\r\n raise(NotImplementedError('Function to load Quantix File not implemented'))\r\n\r\n\r\n\"\"\" Publication figure data (csv file or NeuroData file) \"\"\"\r\nclass FigureData(object):\r\n def __init__(self, dataFile=None, *args, **kwargs):\r\n \"\"\"Initialize class\"\"\"\r\n self.meta = {} # a list of meta parameters in the file\r\n if dataFile is None or not isinstance(dataFile, (str,list,tuple,np.ndarray)):\r\n return\r\n # load the file\r\n self.__loadbyext(dataFile, *args, **kwargs)\r\n\r\n def __loadbyext(self, dataFile, ext=None, *args, **kwargs):\r\n \"\"\"Load data based on extension\"\"\"\r\n if ext is None:\r\n f = dataFile[0] if isinstance(dataFile, (list,tuple,np.ndarray)) \\\r\n else dataFile\r\n ext = os.path.splitext(os.path.basename(os.path.abspath(f)))[-1]\r\n if ext == '.csv': # load csv text file which contains attributes of plots\r\n self.LoadFigureData(dataFile=dataFile, *args, **kwargs)\r\n elif ext == '.dat':\r\n # load NeuroData\r\n self.LoadNeuroData(dataFile, *args, **kwargs)\r\n else:\r\n raise(TypeError('Unrecognized extension %s'%(ext)))\r\n\r\n def LoadFigureData(self, dataFile, sep=',', metachar=\"|\"):\r\n \"\"\"Load data file\"\"\"\r\n if not isinstance(dataFile, str):\r\n raise(TypeError('Please give a single path to .csv data file'))\r\n fid = open(dataFile, 'r')\r\n self.table = []\r\n for rownum, line in enumerate(fid): # iterate through each line\r\n line = line.strip().strip(sep).replace('\\t','').replace('\"','')\r\n if not line or line[0] == \"#\" or line==line[0]*len(line):\r\n continue # skip comments and empty lines\r\n if line[0] == metachar: # metadata starts with \"|\"\r\n self.parse_meta(line, metachar)\r\n else: # assuming the rest of the file is data table\r\n fid.close()\r\n break\r\n # read in the rest of the data\r\n self.table = pd.read_csv(dataFile, sep=sep, comment=\"#\",\r\n skipinitialspace=True, skiprows=rownum,\r\n skip_blank_lines=True)\r\n # set more parameters\r\n self.set_default_labels()\r\n\r\n def set_default_labels(self,cat=None):\r\n def copyvalue(meta, f, g, cat=None):\r\n if f not in meta.keys() and g in meta.keys():\r\n if isinstance(meta[g],list):\r\n meta[f] = cat.join(meta[g]) if cat is not None else \"\"\r\n else:\r\n meta[f] = meta[g]\r\n return(meta)\r\n self.meta = copyvalue(self.meta, 'xlabel','x', cat=cat)\r\n self.meta = copyvalue(self.meta, 'ylabel','y', cat=cat)\r\n try:\r\n self.meta = copyvalue(self.meta, 'zlabel','z', cat=cat)\r\n except:\r\n pass\r\n\r\n def parse_errorbar(self, df=None, simplify=True):\r\n \"\"\"Reorganize errorbar\"\"\"\r\n if df is None:\r\n df = self.table\r\n # find columns of errorbar data\r\n keys = list(self.meta.keys())\r\n # Function to get errobar\r\n def PE(p):\r\n out = df[list(p)]\r\n if out.ndim == 1: # including cases where 'error' column is specified\r\n return([np.array(out), np.array(out)])\r\n elif out.ndim == 2 and out.shape[-1] == 2:\r\n return(np.array(out).T)\r\n else: # fall thorugh, should not happen\r\n return(None)\r\n if 'error_pos' in keys and 'error_neg' in keys:\r\n P = np.array([self.meta['error_pos'], self.meta['error_neg']]).T\r\n elif 'error_pos' in keys:\r\n P = np.array([self.meta['error_pos']])\r\n elif 'error_neg' in keys:\r\n P = np.array([self.meta['error_neg']])\r\n P = [P] if P.ndim==1 else P\r\n out = [PE(p) for p in P]\r\n out = out[0] if len(out)==1 and simplify else out\r\n return(out)\r\n\r\n def parse_meta(self, line,metachar=\"|\"):\r\n \"\"\"Parse parameter\"\"\"\r\n line = line.replace(metachar,\"\")\r\n m, v = line.split(\"=\") # metavaraible, value\r\n m, v = m.strip(), v.strip()\r\n # parse value if it is a list\r\n if v.lower() == \"none\":\r\n self.meta[m] = None\r\n return\r\n if \"[\" in v and \",\" in v:\r\n v = v.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\")\r\n v = [x.strip() for x in v]\r\n self.meta[m] = v\r\n # Force some default values\r\n if 'xlabel' not in self.meta.keys(): self.meta['xlabel'] = ''\r\n if 'ylabel' not in self.meta.keys(): self.meta['ylabel'] = ''\r\n\r\n def LoadNeuroData(self, dataFile, *args, **kwargs):\r\n self.Neuro2Trace(dataFile, *args, **kwargs)\r\n\r\n def Neuro2Trace(self, data, channels=None, streams=None, protocol=False,\r\n *args, **kwargs):\r\n \"\"\"Use NeuroData method to load and parse trace data to be plotted\r\n data: an instance of NeuroData, ro a list of instances\r\n channels: list of channels to plot, e.g. ['A','C','D']\r\n streams: list of data streams, e.g. ['V','C','S']\r\n protocol: load protocol to meta data. Default False.\r\n \"\"\"\r\n # Check instance\r\n if isinstance(data, NeuroData):\r\n data = [data] # convert to list\r\n elif isinstance(data, str): # file path\r\n data = [NeuroData(data, *args, **kwargs)]\r\n elif isinstance(data, list): # a list of objects\r\n for n, d in enumerate(data): # transverse through the list\r\n if isinstance(d, NeuroData): # a list of NeuroData instances\r\n pass\r\n elif isinstance(d,str): # a list of file paths\r\n data[n] = NeuroData(d, *args, **kwargs)\r\n else:\r\n raise TypeError((\"Unrecognized data type\"))\r\n else:\r\n raise TypeError((\"Unrecognized data type\"))\r\n\r\n # initialize notes, stored in stats attribute\r\n self.meta.update({'notes':[], 'xunit':[],'yunit':[],'x':[], 'y':[]})\r\n if protocol:\r\n self.meta.update({'protocol':[]})\r\n # file, voltage, current, channel, time\r\n notes = \"%s %.1f mV %d pA channel %s WCTime %s min\"\r\n self.table = []\r\n\r\n for n, d in enumerate(data): # iterate over all data\r\n series = pd.DataFrame() # initialize series data frame\r\n # Time data\r\n series['time'] = pd.Series(np.arange(0, d.Protocol.sweepWindow+\r\n d.Protocol.msPerPoint, d.Protocol.msPerPoint))\r\n self.meta['x'].append('time')\r\n self.meta['xunit'].append('ms') # label unit\r\n if protocol:\r\n self.meta['protocol'].append(d.Protocol)\r\n # iterate over all the channels\r\n avail_channels = [x[-1] for x in d.Protocol.channelNames]\r\n avail_streams = [x[:-1] for x in d.Protocol.channelNames]\r\n for c in self.listintersect(channels, avail_channels):\r\n # iterate over data streams\r\n for s in self.listintersect(avail_streams,streams):\r\n tmp = {'Volt': d.Voltage, 'Cur':d.Current, 'Stim': d.Stimulus}.get(s)\r\n if tmp is None or not bool(tmp):\r\n continue\r\n tmp = tmp[c]\r\n if tmp is None:\r\n continue\r\n series[s+c] = tmp # series[s, c]\r\n self.meta['y'].append(s+c) # .append((s,c))\r\n if s[0] == 'V':\r\n self.meta['yunit'].append('mV')\r\n elif s[0] == 'C':\r\n self.meta['yunit'].append('pA')\r\n else: #s[0] == 'S'\r\n self.meta['yunit'].append('pA')\r\n dtime = self.sec2hhmmss(d.Protocol.WCtime)\r\n # Notes: file, voltage, current, channel, time\r\n notesstr = notes %(d.Protocol.readDataFrom, \\\r\n d.Voltage[c][0], d.Current[c][0], c, dtime)\r\n self.meta['notes'].append(notesstr)\r\n self.table.append(series)\r\n\r\n # if only 1 data set in the input, output as a dataframe instead of a\r\n # list of dataframes\r\n self.table = self.table[0] if len(self.table)<2 else self.table\r\n # reshape y meta data\r\n #if len(self.meta['x'])!=len(self.meta['y']) and len(self.meta['x'])>1:\r\n self.meta['y']=np.reshape(self.meta['y'],(len(self.meta['x']),-1))\r\n self.meta['yunit'] = np.reshape(self.meta['yunit'],\\\r\n (len(self.meta['xunit']),-1))\r\n\r\n @staticmethod\r\n def listintersect(*args):\r\n \"\"\"Find common elements in lists\"\"\"\r\n args = [x for x in args if x is not None] # get rid of None\r\n def LINT(A,B): #short for list intersection\r\n return list(set(A) & set(B))\r\n if len(args) == 0:\r\n return(None)\r\n elif len(args) == 1:\r\n return(args[0])\r\n elif len(args) == 2:\r\n return(LINT(args[0],args[1]))\r\n else:\r\n newargs = tuple([LINT(args[0], args[1])]) + args[2:]\r\n return(listintersect(*newargs))\r\n\r\n @staticmethod\r\n def sec2hhmmss(sec):\r\n \"\"\"Converting seconds into hh:mm:ss\"\"\"\r\n m, s = divmod(sec, 60)\r\n h, m = divmod(m, 60)\r\n return(\"%d:%d:%0.1f\" % (h, m, s))\r\n\r\nclass FormatException(Exception):\r\n def __init___(self,dErrorArguments):\r\n print(dErrorArguments)\r\n Exception.__init__(self,\"Invalid cell label {0}. Format: Name.ddMMMyy, e.g. Cell A.02Jun15\".format(dErrorArguments))\r\n self.dErrorArguments = dErrorArguments\r\n\r\n# Implement a simple data loader with the assumption of current data structure\r\ndef get_cellpath(cell_label, episode='.{}', year_prefix='20'):\r\n \"\"\"Infer full path of the cell given cell label (without file extension)\r\n e.g. Neocortex A.09Sep15.S1.E13 should yield\r\n ./2015/09.September/Data 9 Sep 15/Neocortex A.09sep15.S1.E13.dat\"\"\"\r\n cell_label = cell_label.replace('.dat', '')\r\n\r\n if episode[0] != '.':\r\n episode = '.'+episode\r\n\r\n dinfo = re.findall('([\\w\\s]+).(\\d+)([a-z_A-Z]+)(\\d+).S(\\d+).E(\\d+)', cell_label)\r\n\r\n if not dinfo: # no episode\r\n dinfo = re.findall('([\\w\\s]+).(\\d+)([a-z_A-Z]+)(\\d+)', cell_label)\r\n else:\r\n episode = ''\r\n\r\n try:\r\n dinfo = dinfo[0]\r\n except:\r\n raise(FormatException(\"Invalid cell label {0}. Format: Name.ddMMMyy, e.g. Cell A.02Jun15\".format(cell_label)))\r\n\r\n # year folder\r\n year_dir = year_prefix + dinfo[3]\r\n # month folder\r\n month_dict = {'Jan':'01.January','Feb':'02.February','Mar':'03.March',\\\r\n 'Apr':'04.April','May':'05.May', 'Jun':'06.June', 'Jul':'07.July',\\\r\n 'Aug':'08.August','Sep':'09.September','Oct':'10.October',\\\r\n 'Nov':'11.November','Dec':'12.December'}\r\n month_dir = month_dict[dinfo[2]]\r\n # data folder\r\n data_folder = \"Data {:d} {} {}\".format(int(dinfo[1]), dinfo[2], year_dir)\r\n data_folder = os.path.join(year_dir, month_dir, data_folder, cell_label+episode+'.dat')\r\n data_folder = data_folder.replace('\\\\','/')\r\n return data_folder\r\n\r\ndef separate_cell_episode(cell_name):\r\n \"\"\"Convert full name of the file, e.g. 'NeocortexChRNBM E.27Jul17.S1.E10.dat', \r\n into tuple 'NeocortexChRNBM E.27Jul17', 'S1.E10' \r\n \"\"\"\r\n dinfo = re.findall('([\\w\\s-]+).(\\d+)([a-z_A-Z]+)(\\d+).S(\\d+).E(\\d+)', cell_name)[0]\r\n \r\n cell_label = dinfo[0]+\".\"+\"\".join(dinfo[1:4])\r\n episode_label = \"S{}.E{}\".format(dinfo[-2], dinfo[-1])\r\n \r\n return cell_label, episode_label\r\n\r\n\r\nclass ROI(object):\r\n \"\"\"Helper class for structuring ROIs\"\"\"\r\n def __init__(self, **kwargs):\r\n for key, value in kwargs.items():\r\n setattr(self, key, value)\r\n\r\nclass ROIData(list):\r\n def __init__(self, roifile=None, old=True, *args, **kwargs):\r\n self.roifile = roifile\r\n if roifile is not None:\r\n self.loadROI(roifile=roifile, old=old, *args, **kwargs)\r\n\r\n def loadROI(self, roifile, old=True, *args, **kwargs):\r\n if old:\r\n self.loadOldROIData(roifile, *args, **kwargs)\r\n\r\n def loadOldROIData(self, roifile, roitype='square'):\r\n fid = open(roifile, 'rb')\r\n fid.seek(4, 0)\r\n n = 0\r\n while n < 1000:\r\n # initialize\r\n roi = ROI(center=0, unknown1=0, size=0, unknown2=0, position=0)\r\n roi.center = np.fromfile(fid, np.int16, 2)\r\n if isempty(roi.center):\r\n return\r\n roi.unknown1 = np.fromfile(fid, np.int16, 1)\r\n roi.size = np.fromfile(fid, np.int16, 2)\r\n roi.unknwon2 = np.fromfile(fid, np.int16, 9)\r\n # Position of the square\r\n roi.position = np.fromfile(fid, np.int16, 4)\r\n roi.position = np.reshape(roi.position, (2,2))\r\n self.append(roi)\r\n n += 1\r\n\r\n # should in theory never reach this, but could indicate some problem\r\n if n>=1000:\r\n print('Maximum iteration exceeded. Loading only 1000 ROIs')\r\n\r\n\r\n\r\nif __name__ == '__main__' and True:\r\n# data = NeuroData(dataFile, old=True)\r\n# figdata = FigureData()\r\n # dataFile = 'C:/Users/Edward/Documents/Assignments/Scripts/Python/Plots/example/lineplot.csv'\r\n# figdata.Neuro2Trace(data, channels=['A','B','C','D'], streams=['Volt','Cur','Stim'])\r\n # data = FigureData(dataFile='D:/Data/2015/07.July/Data 2 Jul 2015/Neocortex C.02Jul15.S1.E40.dat',old=True, channels=['A'], streams=['Volt','Cur','Stim'])\r\n zData = NeuroData(dataFile='D:/Data/Traces/2015/07.July/Data 13 Jul 2015/Neocortex I.13Jul15.S1.E7.dat', old=True, infoOnly=True)\r\n # mData = ImageData(dataFile = 'D:/Data/2photon/2015/03.March/Image 10 Mar 2015/Neocortex D/Neocortex D 01/Neocortex D 01.512x512y1F.m21.img', old=True)\r\n # plt.imshow(mData.img[:,:,0])\r\n # zData = load_trace('Neocortex F.15Jun15.S1.E10')\r\n #roifile = 'C:/Users/Edward/Desktop/Slice B CCh Double.512x200y75F.m1.img.roi'\r\n #R = ROIData(roifile)\r\n"
},
{
"alpha_fraction": 0.6091557145118713,
"alphanum_fraction": 0.6370114684104919,
"avg_line_length": 27.69871711730957,
"blob_id": "0f1aacdc166918af6cb8fabf4732626058c4d86f",
"content_id": "ece34daba96395f5e0ceb650fe8fa31251bceb64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4631,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 156,
"path": "/python_tutorials/ThinkPython/practice_notes_5.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\r\n# Day 5: December 25, 2012\r\n\r\n# A tuple is a sequence of values.\r\n# Use parenthesis for tuples, though it is not necessary.\r\nt1 = ('a','b','c','d','e','f');\r\n#to make a tuple with a single element, parenthesis does not work. Use comma.\r\nt2 = 'a',; #<class 'tuple'>\r\nt3 = ('a'); #<class 'str'>\r\n#use tuple() to create a tuple (empty or not).\r\nt4 = tuple();#empty tuple\r\nt5 = tuple('lupins');#tuples of each element of the string\r\nprint(t5);\r\n#>>>('l', 'u', 'p', 'i', 'n', 's')\r\n#use bracket to index tuple\r\nt5[3];\r\n#>>>'i'\r\n# In contrast to list, tuples are immutable\r\nt5[3]='A';\r\n#>>>TypeError: 'tuple' object does not support item assignment\r\n#we can reassign the tuple with a new tuple\r\nt5=t5[:3]+('A',)+t5[4:];\r\nprint(t5);\r\n#>>>('l', 'u', 'p', 'A', 'n', 's')\r\n\r\n# Tuple Assignments\r\nemail_addr='[email protected]';\r\nuName,domName=email_addr.split('@');#splitting the string at '@'\r\nprint(uName);\r\n#>>>monty\r\nprint(domName);\r\n#>>>python.org\r\n\r\n# Tuple as return values\r\nt=divmod(7,3);\r\nprint(t);\r\n#>>>(2,1) #(quotient, remainder)\r\n#we may also do\r\nquot,remd=divmod(7,3);\r\nprint(quot);\r\n#>>>2\r\nprint(remd);\r\n#>>>1\r\n# An example function that returns tuple\r\ndef min_max(t):\r\n return min(t),max(t);\r\n\r\nt=(1,2,3,4,5,6,7,8);\r\nt_min,t_max=min_max(t);\r\nprint(t_min);\r\n#>>>1\r\nprint(t_max);\r\n#>>>8\r\n\r\n#'*' in front of the parameter: gather or scatter\r\n#gather: takes arbitrarily many arguments and do commands with all of them\r\ndef printAll(*arg):\r\n print arg; #print every single input arguments\r\n\r\n#scatter: given one argument (e.g. tuple), separate them to fit what the command\r\n#requires\r\nt=(7,3);\r\ndivmod(t);\r\n#>>>TypeError: divmod expected 2 arguments, got 1\r\ndivmod(*t);\r\n#>>>(2,1)\r\n\r\ndef sumAll(*args): #this should gather all the args into a tuple\r\n return sum(args); #sums a tuple\r\n\r\nsumAll(2,3,4,5,6,2,3,4,1);\r\n#>>>30\r\n\r\n# List and tuples\r\n#zip() combines multiple sequences into a list of tuples\r\ns='abc';\r\nt=[0,1,2,3,4];\r\nz=zip(s,t);#note the returned list has length of the shorter sequence\r\nprint(z);\r\n#supposedly, it looks like the following, but Python 3 does not print like this\r\n#[('a',0),('b',1),('c',2)] -->Python2\r\n#<zip object at 0x0000000002FBC5C8> -->Python3\r\nfor letter,number in z:\r\n print(letter,number);\r\n\r\n#>>>\r\n#a 0\r\n#b 1\r\n#c 2\r\n\r\n#to transverse the elements and indices a sequence, use enumerate()\r\nfor index, element in enumerate('abc'):\r\n print(index,element);\r\n#>>>\r\n#0 a\r\n#1 b\r\n#2 c\r\n\r\n# Dictionaries and tuples\r\n#.items() method of dictionaries returns a list of tuples, where each element\r\n#of the tuple is a (key,value) pair\r\nd={'a':1,'b':2,'c':3,'d':4};\r\nt=d.items();\r\nprint(t);\r\n#>>>dict_items([('d', 4), ('b', 2), ('c', 3), ('a', 1)])\r\n#in fact, this 'dict_items' is called a iterator, but it behaves like a list,\r\n#and we may convert this into a list by doing list(d.items())\r\n\r\n#create a dictionary of (string,index)\r\nd=dict(zip('asdfgh',range(len('asdfgh'))));\r\nprint(d);\r\n#>>>{'h': 5, 'f': 3, 'g': 4, 'd': 2, 's': 1, 'a': 0}\r\n\r\n#.update() method of dictionary adds a list of tuples to the dictionary\r\nd.update([('z',7),('m',9)]);\r\n\r\n#use tuples as keys of a dictionary\r\nd.clear();#clear all the items in the dictionary\r\nlastName=['Smith','Wang','Lee','Allen','Georgeton','Schuman'];\r\nfirstName=['John','Julie','Thomas','Nich','Busk','Henry'];\r\nphoneNum=['626','232','888','333','123','999'];\r\n\r\nd=dict();\r\nfor i in range(0,len(lastName)):\r\n d[lastName[i],firstName[i]]=phoneNum[i];\r\n \r\n# Tuple comparison\r\n#tuple compares the first elements of each tuple, if tie, go to the next one\r\n#sorting words from shortes to the longest\r\ndef sort_by_length(words_list):\r\n l=list();#empty list for the sorted words\r\n for word in words_list:\r\n l.append((len(word),word));\r\n \r\n l.sort(reverse=True);#'reverse=True' make sure sorting in descending order\r\n sorted_list=[];\r\n for wl,wd in l:\r\n sorted_list.append(wd);\r\n \r\n return sorted_list;\r\n\r\nword_list=['adds','vista','banana','fda','joke'];\r\nafter_sort=sort_by_length(word_list);\r\nprint(after_sort);\r\n#>>>['banana', 'vista', 'joke', 'adds', 'fda']\r\n#note that 'joke' and 'adds' have the same length. It will sort by the second\r\n#element of the tuple, which are the words. Since 'j' comes after 'a', and\r\n#we specified to sort by descending order, 'joke' comes before 'adds'\r\n\r\n# When to use tuple\r\n#1) when trying to return a list of parameters in a function\r\n#2) when required using an immutable sequence, for instance, creating the key\r\n#of a dictionary (can also use strings)\r\n#3) when passing a sequence to a function to avoid aliasing\r\n\r\n#This concludes today's study."
},
{
"alpha_fraction": 0.6661429405212402,
"alphanum_fraction": 0.6789735555648804,
"avg_line_length": 40.510868072509766,
"blob_id": "9e215ca68f122013b4b6a28bae420056f6114a97",
"content_id": "024a31c40949c0ae926d5226cfca3b2023a63a05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3819,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 92,
"path": "/fMRI_pipeline/space_time_realign_1st_draft.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nThis script requires the nipy-data package to run. It is an example of\nsimultaneous motion correction and slice timing correction in\nmulti-session fMRI data from the FIAC 2005 dataset. Specifically, it\nuses the first two sessions of subject 'fiac0'.\n\nUsage:\n python space_time_realign.py\n\nTwo images will be created in the working directory for the realigned series::\n\n rarun1.nii\n rarun2.nii\n\nAuthor: Alexis Roche, 2009.\n\"\"\"\nPYTHONPKGPATH = '/hsgs/projects/jhyoon1/pkg64/pythonpackages/'\n\n#from __future__ import print_function # Python 2/3 compatibility\nimport sys,os\nsys.path.append(os.path.join(PYTHONPKGPATH,'nibabel-1.30'))\nimport nibabel# required for nipy\nsys.path.append(os.path.join(PYTHONPKGPATH,'nipy-0.3'))\nimport numpy as np\nfrom nipy.algorithms.registration import SpaceTimeRealign\nfrom nipy.algorithms.slicetiming import timefuncs\nfrom nipy import load_image, save_image\n\n# Input images\ndef space_time_realign(Images,TR=2,numslices=None,SliceTime='asc_alt_2',RefScan=None):\n '''\n 4D simultaneous slice timing and spatial realignment. Adapted from\n Alexis Roche's example script, and extend to be used for multiplex\n imaging sequences\n \n Inputs:\n \n Images: list of images, input as a list of strings\n \n numslices: for non-multiplex sequence, default to be the number of\n slices in the image. For multiplex sequence, enter as a tuple,\n such that the first element is the number of planes acquired in\n parallel between each other, and the second element is the number\n of slices of each parallel plane/slab\n \n SliceTime:enter as a string to specify how the slices are ordered.\n Choices are the following\n 1).'ascending': sequential ascending acquisition\n 2).'descending': sequential descending acquisition\n 3).'asc_alt_2': ascending interleaved, starting at first slice\n 4).'asc_alt_2_1': ascending interleaved, starting at the second\n slice\n 5).'desc_alt_2': descending interleaved, starting at last slice\n 6).'asc_alt_siemens': ascending interleaved, starting at the first\n slice if odd number of slices, or second slice if even number\n of slices\n 7).'asc_alt_half': ascending interleaved by half the volume\n 8).'desc_alt_half': descending interleaved by half the volume\n \n RefScan: reference volume for spatial realignment movement estimation\n '''\n \n # load images \n runs = [load_image(run) for run in Images]\n # parse data info\n if numslices is None:\n numslices = runs[0].shape[2]\n numplanes = 1\n elif isinstance(numslices,tuple):\n numslices = numslices[0]\n numplanes = numplanes[1]\n # parse slice timing according to the input\n slice_timing = getattr(timefuncs,SliceTime)(TR,numslices)\n #repeat the slice timing for multiplex seqquence\n slice_timing = np.tile(slice_timing,numplanes)\n # Spatio-temporal realigner assuming interleaved ascending slice order\n R = SpaceTimeRealign(runs, tr=TR, slice_times=slice_timing, slice_info=2,\n affine_class='Rigid')\n \n print('Slice times: %s' % slice_timing)\n # Estimate motion within- and between-sessions\n R.estimate(refscan=RefScan)\n # Resample data on a regular space+time lattice using 4d interpolation\n print('Saving results ...')\n for i in range(len(runs)):\n corr_run = R.resample(i)\n fname = os.path.join(os.path.split(Images[i])[0],'ra' + os.path.split(Images[i])[1])\n save_image(corr_run, fname)\n print(fname)\n"
},
{
"alpha_fraction": 0.42230933904647827,
"alphanum_fraction": 0.5035638213157654,
"avg_line_length": 33.15853500366211,
"blob_id": "b4ca5274737e87111eae1ec101dded57fbd418c3",
"content_id": "be2dedd3fe713868ca6cb48cd2cb56d8f2ec53de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2806,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 82,
"path": "/Spikes/spikedetekt2/experimental/test1.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Test script illustrating how to run spikedetekt on a .dat file.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom spikedetekt2 import *\n\nDIRPATH = 'data'\nfilename = 'dat1s'\n\nsample_rate = 20000\nduration = 1.\nnchannels = 32\nchunk_size = 20000\nnsamples = int(sample_rate * duration)\nraw_data = 'data/dat1s.dat'\n\nprm = get_params(**{\n 'nchannels': nchannels,\n 'sample_rate': sample_rate,\n 'chunk_size': chunk_size,\n 'detect_spikes': 'negative',\n})\nprb = {'channel_groups': [\n {\n 'channels': range(nchannels),\n 'graph': [\n [0, 1], [0, 2], [1, 2], [1, 3], [2, 3], [2, 4],\n [3, 4], [3, 5], [4, 5], [4, 6], [5, 6], [5, 7],\n [6, 7], [6, 8], [7, 8], [7, 9], [8, 9], [8, 10],\n [9, 10], [9, 11], [10, 11], [10, 12], [11, 12], [11, 13],\n [12, 13], [12, 14], [13, 14], [13, 15], [14, 15], [14, 16],\n [15, 16], [15, 17], [16, 17], [16, 18], [17, 18], [17, 19],\n [18, 19], [18, 20], [19, 20], [19, 21], [20, 21], [20, 22],\n [21, 22], [21, 23], [22, 23], [22, 24], [23, 24], [23, 25],\n [24, 25], [24, 26], [25, 26], [25, 27], [26, 27], [26, 28],\n [27, 28], [27, 29], [28, 29], [28, 30], [29, 30], [29, 31],\n [30, 31]\n ],\n }\n]}\n\n# Delete the files if the script is called with \"reset\" option.\nif 'reset' in sys.argv:\n if files_exist(filename, dir=DIRPATH):\n delete_files(filename, dir=DIRPATH)\n\n# Create empty files if they do not exist yet.\nif not files_exist(filename, dir=DIRPATH):\n create_files(filename, dir=DIRPATH, prm=prm, prb=prb)\n \n # Open the files.\n files = open_files(filename, dir=DIRPATH, mode='a')\n \n # Add data.\n add_recording(files, \n sample_rate=sample_rate,\n nchannels=nchannels)\n add_cluster_group(files, channel_group_id='0', id='noise', name='Noise')\n add_cluster(files, channel_group_id='0',)\n \n # Close the files\n close_files(files)\n \n# Open the files in writing mode and run SpikeDetekt\nwith Experiment(filename, dir=DIRPATH, mode='a') as exp:\n run(raw_data, experiment=exp, prm=prm, probe=Probe(prb))\n\n# Open the file in read-only mode to look into it.\nwith Experiment(filename, dir=DIRPATH) as exp:\n print \"spikes:\", len(exp.channel_groups[0].spikes)\n fm = exp.channel_groups[0].spikes.features_masks\n wf = exp.channel_groups[0].spikes.waveforms_filtered\n \n print \"Features & masks:\", fm.shape\n print \"Waveforms:\", wf.shape\n \n"
},
{
"alpha_fraction": 0.6708229184150696,
"alphanum_fraction": 0.6807979941368103,
"avg_line_length": 58.150001525878906,
"blob_id": "5007335df54787738045def62d3fd533bba60446",
"content_id": "144b87b77b7b5528f3794330ccc26c281fd701f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1203,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 20,
"path": "/PySynapse/archive/layoutwidget_gridlayout.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Initialize the layout widget\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.layout().setSpacing(10)\r\n widgetFrame.setObjectName(_fromUtf8(\"LayoutWidgetFrame\"))\r\n all_streams = sorted(set([l[0] for l in all_layouts]))\r\n all_streams = [s for s in ['Voltage', 'Current','Stimulus'] if s in all_streams]\r\n all_channels = sorted(set([l[1] for l in all_layouts]))\r\n # Layout setting table\r\n self.setLayoutTable(all_streams, all_channels)\r\n # Buttons for adding and removing channels and streams\r\n addButton = QtGui.QPushButton(\"Add\") # Add a channel\r\n addButton.clicked.connect(lambda: self.addLayoutRow(all_streams=all_streams, all_channels=all_channels))\r\n removeButton = QtGui.QPushButton(\"Remove\") # Remove a channel\r\n removeButton.clicked.connect(self.removeLayoutRow)\r\n # Add the exisiting channels and streams to the table\r\n widgetFrame.layout().addWidget(addButton, 1, 0)\r\n widgetFrame.layout().addWidget(removeButton, 1, 1)\r\n widgetFrame.layout().addWidget(self.layout_table, 2, 0, self.layout_table.rowCount(), 2)\r\n return widgetFrame\r\n"
},
{
"alpha_fraction": 0.4545454680919647,
"alphanum_fraction": 0.5530303120613098,
"avg_line_length": 10.800000190734863,
"blob_id": "12dda35512b79a393f201a4f85b81034c8a4be24",
"content_id": "f31994474c97848d24b36bca5ad13096d4aecc26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 10,
"path": "/generic/Arduino.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 05 00:57:19 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n\r\nclass \r\ndef ArduinoPortWrite():\r\n "
},
{
"alpha_fraction": 0.7263681888580322,
"alphanum_fraction": 0.7426754832267761,
"avg_line_length": 51.599998474121094,
"blob_id": "b96d3b1efbf4d0898a6bdbf393996421dc2b541c",
"content_id": "15ed3f8bacd77242212a011c4c9194193651ee53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7236,
"license_type": "no_license",
"max_line_length": 642,
"num_lines": 135,
"path": "/PySynapse/README.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# PySynapse\r\n\r\nAn interactive utility for electrophyiological data analyses.\r\n\r\n\r\n\r\n## Script structure\r\n\r\n* `SynapseQt.py`: main window\r\n* `app`: other apps / windows\r\n - `app/Scope.py`: window for trace display\r\n - `app/AccordionWidget.py`: a class for designing side dock panel toolbox\r\n - `app/Mirage.py`: window for image display\r\n - `app/Annotation.py`: additional GUI interface for creating annotation objects\r\n\r\n* `util`: utility functions\r\n - `util/ImportData`: data reading utilities\r\n - `util/ExportData`: export figures\r\n\r\n* `resources`: icons, fonts, etc.\r\n\r\n**Planned features of Mirage window**\r\n* Display a stack as movie\r\n* Display the Maximum Pixel Intensity image\r\n* dF/F trace\r\n\r\n#########################################################################\r\n1. Dependencies:\r\n - numpy\r\n - pandas\r\n - PyQt5\r\n - pyqtgraph: for data display (trace and image)\r\n - matplotlib: for exporting figures\r\n\r\n2. To-dos:\r\n - Integrate Ben's clipboard program to make .ini files (Export .ini file)\r\n - Export matplotlib figure to Bokeh for more interactive display.\r\n\r\n## Update Jun 29, 2018\r\n* Now uses PyQt5\r\n* TODO: need to fix the problem with annotation square where the border of the box still shows even after unchecking the \"line\" option\r\n\r\n## Update Jun 4, 2018\r\n* Fixed some bugs\r\n* Added utility to read .csv file, e.g. exported from SQL database query\r\n* Added utility to show detected events, along with exporting the drawn ticks of events to figures\r\n\r\n## Update Sep 6, 2017\r\n* Added trace \"**Filter**\" tool\r\n* Fixed bugs in \"**Arithmetic**\" tool\r\n* Fixed bugs in \"**Annotation**\" tool and implemented more functions\r\n\r\n## Update Apr 11, 2017\r\n* Fixed bug when exporting figures, scalebar labels are being drawn twice\r\n* Fixed import errors / bugs after separating Toolbox side dock\r\n\r\n## Update Apr 1, 2017\r\n* Started on annotation widget.\r\n - Able to add and delete the annotation items now.\r\n - Still need to implement the actual drawing on PyQtGraph window and export utilities in matplotlib\r\n\r\n## Update Oct 31, 2016\r\n* Added detection of cell attached spikes\r\n\r\n## Update Sep 29, 2016\r\n* Added functionality to export traces arranged horizontally; good for experiments acquired over several episodes and to be viewed as a whole --> will also add horizontal arrangement for pyqtgraph Scope window as well in the future.\r\n* Added setting options specific to horizontal plot exports --> but need to group them better in the future.\r\n\r\n## Update Sep 10, 2016\r\n* Added settings window. Default settings are saved under ./resouces/config.ini and interfaced by GUI under the main synapse window: File/Settings.\r\n - For now, implemented settings for exporting traces. Planned to extend the new settings function to other aspects of the program\r\n - Extended some functions in trace export\r\n* Set icons for each app window\r\n* Corrected some typos\r\n\r\n## Update Aug 20, 2016\r\n* Added arithmetic tool to calculate traces (averages, subtractions)\r\n* Added curve fitting tools to fit polynomials, exponentials (3 equations), power law (2 equations)\r\n* Fixed some bugs on exporting figures\r\n* Changed to PySynapse ver 0.3\r\n\r\n## Update Jun 11, 2016\r\n* Added \"Arithmetic\", \"Layout\", and \"Event\" tools in the toolbox\r\n - \"Arithmetic\": remove baseline (\"null\" checkbox) and trace averaging / manipulation (to be implemented)\r\n - \"Layout\": add and remove data streams\r\n - \"Event\": event detector, including APs, PSPs. Extracellular spike detection is yet to be implemented\r\n* Fixed various bugs and fine tuned some behaviors.\r\n\r\n## Update Apr 24, 2016\r\n* Side dock panel toolbox\r\n - Added \"Channel\" toolbox. Now can add and remove data channel at ease.\r\n - Started \"Analysis\" toolbox. Need to write the corresponding analysis functions first.\r\n\r\n## Update Apr 9, 2016\r\n* Export multiple traces using matplotlib; only 'overlap' configuration is fully working\r\n* Exported traces can be color coded, if the user turn on \"colorfy\" option in the Scope window; Colors cycles use tableau10\r\n* Files are exported as .eps, and font set to Helvetica, using a .ttf file under `./resources`; fontsize=12.0. This should be cross-platform, as it does not depend on the system's font repository. It should also be editable in vector graphics editor. From experience, if using 'Arial' font in Windows, the font header in the .eps file cannot be recognized by editors, and the entire graphics cannot be imported successfully (true for InkScape and CorelDraw)\r\n\r\n## Update Mar 20, 2016\r\n* Improved file system interface\r\n - Folders and files with numbers now sort intuitively to human reading\r\n - Fixed network drive volume info query\r\n - Fixed file system horizontal scroll\r\n* Scope window:\r\n - Allow setting view range of traces\r\n* Exporting:\r\n - Allow exporting the traces to a .eps file. For now, can only export from the same cell. A restructure would be needed for other cells.\r\n - Yet to implement the actual plotting and exporting part. Has set up the hooks.\r\n\r\n## Update Mar 13, 2016\r\n* Scope window is fully functional now.\r\n - Plot traces with or without color. Colors usage are tracked correctly. Colors are drawn from tableau10.\r\n - Plot traces of multiple channels of data, with time domain linked\r\n - Plot traces of multiple episodes of data, correctly distribute them across channels of data\r\n* Added toggle functionality in both main and scope windows\r\n - Allow toggle of additional columns of episode list tableview\r\n - Allow toggle of side panel of scope window\r\n - Allow toggle between colored traces and black traces\r\n\r\n## Update Mar 6, 2016\r\n* The table view is fully functional. Clicking on the table selects a row. Multiple rows can be selected by dragging along the rows, by clicking while holding Ctrl, or by holding SHIFT.\r\n* Each selection will highlight with a preferred, custom, blue color.\r\n* Clicking the episode will spin up the Scope window. By tracking the history of clicking (from the previous state), it is possible to load up the traces faster.\r\n\r\n## Update Feb 20, 2016\r\n* Now in Windows system, at startup, the program will list all the drives more efficiently via wmic\r\n * In this implementation, I addressed potentially problematic X:\\ drives. When X:\\ was mounted but disconnected due to network problem, the Windows system still register it as an active drive, but unable to get volume information. It will takes 10s of seconds before it return an error. With this implementation, I set out a timeout of 2 seconds from subprocess calling to inquire volume name information. Upon encountering disconnected X:\\ drive, wmic volume will return as not available very quickly. To further safeguard and reduce startup time, if wmic call takes more than 2 seconds to inquire volume name, it will give up the inquiry.\r\n\r\n## Update Feb 13, 2016\r\n* Changed to Version PySynapse 0.2\r\n* Reimplemented a custom FileSystemModel to allow insertion of custom rows, using QAbstractitemModel. Original attempt using QFileSystemModel was unsuccessful and complicated.\r\n* Default startup directory depends on operating system.\r\n * Windows: list all the drives, like default QFileSystemModel\r\n * Mac: /Volumes\r\n * Linux: /\r\n"
},
{
"alpha_fraction": 0.7099447250366211,
"alphanum_fraction": 0.7182320356369019,
"avg_line_length": 37.105262756347656,
"blob_id": "e0c8208508ae6daddee650ecf9a0f8c82f3d8667",
"content_id": "8c69aa593ffdc9d70b2e5ac6733604e0c6bdb3ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 724,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 19,
"path": "/README.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "Python\n==============================================================\n\nPython scripts for general purposes, data analysis, and plotting.\n\n**Under development**\n\n# Current list of packages #\n\n## General purpose utilities ##\n1. `generic`: generic functions and routines\n2. `ReadNWrite`: Python interface with Excel spreadsheet\n\n## fMRI utilities ##\n1. `fMRI_pipeline`: adpating some steps of fMRI processing and analysis written in Python\n\n## Plotting / visualization utilities ##\n1. `Plots`: generating publication quality plots using `matplotlib`\n2. `PySynapse`: PyQt4 and pyqtgraph implementation of the original Synapse program, used in Strowbridge Lab for electrophysiological data visualization and preliminary analyses\n"
},
{
"alpha_fraction": 0.6012335419654846,
"alphanum_fraction": 0.6128891110420227,
"avg_line_length": 46.79857635498047,
"blob_id": "5e4855f098e6be889de5462b405d4eed990752b6",
"content_id": "9ca27c2029475fc8c103392388164e39b08cf7ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20591,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 422,
"path": "/PySynapse/app/Settings.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 10 14:42:35 2016\r\n\r\nSettings window\r\n\r\nInterfrace for settings\r\nRead and write ./resouces/config.ini text file\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport fileinput\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\r\n\r\nsys.path.append(os.path.join(__location__, '..')) # for debug only\r\nfrom util.MATLAB import *\r\n\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig)\r\n\r\n# <editor-fold desc=\"Global Settings\">\r\n# ------------ Read and write the settings file ----------------------------------\r\ndef readini(iniPath):\r\n \"\"\"Read the saved config.ini for previous settings\"\"\"\r\n options = dict()\r\n with open(iniPath, 'r') as f: # read only\r\n # read line by line\r\n for line in f:\r\n if line[0] == '#': # skip comment line\r\n continue\r\n elif \"#\" in line: # has both comments and hex vlaues\r\n params, comments = line.rsplit('#', 1) # try and see if the comments in the last hash\r\n if (\"]\" in comments and \"[\" in params) or \\\r\n (\")\" in comments and \"(\" in params) or \\\r\n (\"}\" in comments and \"{\" in params) :\r\n pass # don't split if the line has more hash, has grouping elements\r\n else:\r\n line = params\r\n\r\n # Separate the line by '='\r\n key, val = [k.strip() for k in line.split('=')]\r\n # Check if the current line has a list\r\n if \"[\" in val and \"]\" in val: # remove the brackets\r\n val = val[val.find(\"[\")+1:val.find(\"]\")].strip()\r\n val = [v.replace(\"'\",\"\").replace('\"','').strip() for v in val.split(\",\")]\r\n # check if val is numeric string\r\n try:\r\n val = str2numeric(val)\r\n except:\r\n pass\r\n \r\n # check if val is boolean\r\n if isinstance(val, str):\r\n if val.lower() == 'true':\r\n val = True\r\n elif val.lower() == 'false':\r\n val = False\r\n \r\n options[key] = val\r\n \r\n if not f.closed:\r\n f.close()\r\n\r\n return options\r\n \r\ndef writeini(iniPath, options):\r\n \"\"\"Write the config.ini to save the current settings\"\"\"\r\n with fileinput.input(iniPath, inplace=True) as f:\r\n for line in f:\r\n if line[0] == '#' or line.strip() == '': # comment or empty line\r\n print(line, end='') # this actually writes the line to the file\r\n continue\r\n elif \"#\" in line:\r\n params, comments = line.rsplit('#', 1) # try and see the comments in the last hash\r\n if (\"]\" in comments and \"[\" in params) or \\\r\n (\")\" in comments and \"(\" in params) or \\\r\n (\"}\" in comments and \"{\" in params) :\r\n # don't split if the line has more hash, has grouping elements\r\n params, comments = line.strip(), ''\r\n else:\r\n params = params.strip()\r\n comments = '#'+comments\r\n else: # no comments\r\n params, comments = line.strip(), ''\r\n \r\n # parse which key of the option dictionary in the current params\r\n for k, v in options.items():\r\n if k == params.split('=')[0].strip():\r\n writeStr = '{} = {} {}'.format(k, str(v), comments).strip()\r\n print(writeStr) # this actually writes the line to the file\r\n break\r\n\r\n# ------------ Settings widget ---------------------------------------------------\r\nclass Settings(QtWidgets.QWidget):\r\n def __init__(self, parent=None, iniPath=None):\r\n super(Settings, self).__init__(parent)\r\n self.setWindowTitle(\"Settings\")\r\n self.setWindowIcon(QtGui.QIcon('resources/icons/setting.png'))\r\n self.isclosed = True\r\n if iniPath == None:\r\n self.iniPath = os.path.join(__location__,'../resources/config.ini')\r\n else:\r\n self.iniPath = iniPath\r\n # Get the options: sets self.options\r\n self.options = readini(iniPath=self.iniPath)\r\n self.settingDict = {} # map between field name and objects that stores the setting\r\n # Set up the GUI\r\n self.setLayout(QtWidgets.QVBoxLayout())\r\n self.tabWidget = QtWidgets.QTabWidget()\r\n\r\n # Adding tabs\r\n self.tabWidget.addTab(self.exportTraceTabUI(),\"Export\") \r\n self.tabWidget.addTab(self.viewTabUI(), 'View')\r\n\r\n # buttons for saving the settings and exiting the settings window\r\n OK_button = QtWidgets.QPushButton('OK')\r\n OK_button.setDefault(True)\r\n OK_button.clicked.connect(lambda: self.updateSettings(closeWidget=True))\r\n Apply_button = QtWidgets.QPushButton('Apply')\r\n Apply_button.clicked.connect(lambda: self.updateSettings(closeWidget=False))\r\n Cancel_button = QtWidgets.QPushButton('Cancel')\r\n Cancel_button.clicked.connect(self.close)\r\n self.buttonGroup = QtWidgets.QGroupBox()\r\n self.buttonGroup.setLayout(QtWidgets.QHBoxLayout())\r\n self.buttonGroup.layout().addWidget(OK_button, 0)\r\n self.buttonGroup.layout().addWidget(Apply_button, 0)\r\n self.buttonGroup.layout().addWidget(Cancel_button, 0)\r\n \r\n self.layout().addWidget(self.tabWidget)\r\n self.layout().addWidget(self.buttonGroup)\r\n\r\n #--------------- Set up the GUI ----------------------------------------------\r\n def exportTraceTabUI(self):\r\n widgetFrame = QtWidgets.QFrame()\r\n widgetFrame.setLayout(QtWidgets.QVBoxLayout())\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n widgetFrame.setSizePolicy(sizePolicy)\r\n widgetFrame.setObjectName(_fromUtf8(\"ExportTraceWidgetFrame\"))\r\n\r\n # %% Size\r\n fig_size_W_label = QtWidgets.QLabel('Width (inches)')\r\n fig_size_W_text = QtWidgets.QLineEdit(str(self.options['figSizeW']))\r\n self.settingDict['figSizeW'] = fig_size_W_text\r\n fig_size_W_checkBox = QtWidgets.QCheckBox('Dynamically Adjust Width')\r\n fig_size_W_checkBox.setToolTip('Dynamically adjust the width of the figure when exporting multiple episodes')\r\n fig_size_W_checkBox.setCheckState(2 if self.options['figSizeWMulN'] else 0)\r\n self.settingDict['figSizeWMulN'] = fig_size_W_checkBox\r\n \r\n fig_size_H_label = QtWidgets.QLabel('Height (inches)')\r\n fig_size_H_text = QtWidgets.QLineEdit(str(self.options['figSizeH']))\r\n self.settingDict['figSizeH'] = fig_size_H_text\r\n fig_size_H_checkBox = QtWidgets.QCheckBox('Dynamically Adjust Height')\r\n fig_size_H_checkBox.setToolTip('Dynamically adjust the width of the figure when exporting multiple episodes')\r\n fig_size_H_checkBox.setCheckState(2 if self.options['figSizeHMulN'] else 0)\r\n self.settingDict['figSizeHMulN'] = fig_size_H_checkBox\r\n\r\n\r\n size_groupBox = QtWidgets.QGroupBox(\"Size\")\r\n size_groupBox.setLayout(QtWidgets.QGridLayout())\r\n size_groupBox.layout().addWidget(fig_size_W_label, 0, 0, 1, 1)\r\n size_groupBox.layout().addWidget(fig_size_W_text, 0, 1, 1, 1)\r\n size_groupBox.layout().addWidget(fig_size_W_checkBox, 0, 2, 1, 2)\r\n size_groupBox.layout().addWidget(fig_size_H_label, 1, 0, 1, 1)\r\n size_groupBox.layout().addWidget(fig_size_H_text, 1, 1, 1, 1)\r\n size_groupBox.layout().addWidget(fig_size_H_checkBox, 1, 2, 1, 2)\r\n\r\n # %% Concatenated\r\n hSpace_label = QtWidgets.QLabel('Horizontal Space')\r\n hSpace_label.setToolTip('Only relevant when concatenating series of traces')\r\n hSpace_spinbox = QtWidgets.QSpinBox()\r\n hSpace_spinbox.setValue(self.options['hFixedSpace'])\r\n hSpace_spinbox.setSuffix('%')\r\n hSpace_spinbox.setRange(0,100)\r\n hSpace_spinbox.setFixedWidth(50)\r\n hSpace_comboBox = QtWidgets.QComboBox()\r\n hSpace_comboList = ['Fixed', 'Real Time']\r\n hSpace_comboBox.addItems(hSpace_comboList)\r\n hSpace_comboBox.setCurrentIndex(hSpace_comboList.index(self.options['hSpaceType']))\r\n hSpace_comboBox.currentIndexChanged.connect(lambda: self.toggleHFixedSpace(hSpace_comboBox, hSpace_spinbox, 'Real Time'))\r\n if hSpace_comboBox.currentText() == 'Real Time':\r\n hSpace_text.setEnabled(False)\r\n self.settingDict['hSpaceType'] = hSpace_comboBox\r\n # self.settingDict['hFixedSpace'] = hSpace_text\r\n self.settingDict['hFixedSpace'] = hSpace_spinbox\r\n \r\n concat_groupBox = QtWidgets.QGroupBox('Concatenated')\r\n concat_groupBox.setLayout(QtWidgets.QGridLayout())\r\n concat_groupBox.layout().addWidget(hSpace_label, 0, 0, 1,1)\r\n concat_groupBox.layout().addWidget(hSpace_comboBox, 0, 1, 1,1)\r\n concat_groupBox.layout().addWidget(hSpace_spinbox, 0, 3, 1,1)\r\n \r\n # %% Gridspec options\r\n gridSpec_label = QtWidgets.QLabel('Arrangement')\r\n gridSpec_label.setToolTip('Only relevant when exporting series of traces in a grid layout')\r\n gridSpec_comboBox = QtWidgets.QComboBox()\r\n gridSpec_comboList = ['Vertically', 'Horizontally', 'Channels x Episodes', 'Episodes x Channels']\r\n gridSpec_comboBox.addItems(gridSpec_comboList)\r\n gridSpec_comboBox.setCurrentIndex(gridSpec_comboList.index(self.options['gridSpec']))\r\n self.settingDict['gridSpec'] = gridSpec_comboBox\r\n \r\n scalebarAt_label = QtWidgets.QLabel('Scalebar Location')\r\n scalebarAt_label.setToolTip('Only relevant when exporting series of traces in a grid layout')\r\n scalebarAt_comboBox = QtWidgets.QComboBox()\r\n scalebarAt_comboList = ['All', 'First','Last','None']\r\n scalebarAt_comboBox.addItems(scalebarAt_comboList)\r\n scalebarAt_comboBox.setCurrentIndex(scalebarAt_comboList.index(self.options['scalebarAt']))\r\n self.settingDict['scalebarAt'] = scalebarAt_comboBox\r\n \r\n gridSpec_groupBox = QtWidgets.QGroupBox('Grid')\r\n gridSpec_groupBox.setLayout(QtWidgets.QGridLayout())\r\n gridSpec_groupBox.layout().addWidget(gridSpec_label, 0, 0, 1,1)\r\n gridSpec_groupBox.layout().addWidget(gridSpec_comboBox, 0, 1, 1, 1)\r\n gridSpec_groupBox.layout().addWidget(scalebarAt_label, 1, 0, 1,1)\r\n gridSpec_groupBox.layout().addWidget(scalebarAt_comboBox, 1,1, 1, 1)\r\n \r\n # %% output\r\n dpi_label = QtWidgets.QLabel('DPI')\r\n dpi_text = QtWidgets.QLineEdit(str(self.options['dpi']))\r\n self.settingDict['dpi'] = dpi_text\r\n\r\n linewidth_label = QtWidgets.QLabel('Linewidth')\r\n linewidth_text = QtWidgets.QLineEdit(str(self.options['linewidth']))\r\n self.settingDict['linewidth'] = linewidth_text\r\n \r\n fontName_label = QtWidgets.QLabel('Font Name')\r\n fontName_text = QtWidgets.QLineEdit(self.options['fontName'])\r\n self.settingDict['fontName'] = fontName_text\r\n fontSize_label = QtWidgets.QLabel('Label Font Size')\r\n fontSize_text = QtWidgets.QLineEdit(str(self.options['fontSize']))\r\n self.settingDict['fontSize'] = fontSize_text\r\n #annotfontSize_label = QtWidgets.QLabel('Annotation Font Size')\r\n #annotfontSize_text = QtWidgets.QLineEdit(str(self.options['annotfontSize']))\r\n #self.settingDict['annotfontSize'] = annotfontSize_text\r\n \r\n annotation_label = QtWidgets.QLabel('Annotation')\r\n annotation_comboBox = QtWidgets.QComboBox()\r\n ann_comboList = ['Label Only', 'Simple', 'Full', 'None']\r\n annotation_comboBox.addItems(ann_comboList)\r\n annotation_comboBox.setCurrentIndex(ann_comboList.index(self.options['annotation']))\r\n self.settingDict['annotation'] = annotation_comboBox\r\n\r\n monostim_checkbox = QtWidgets.QCheckBox('Force Black Stim')\r\n monostim_checkbox.setToolTip('If checked, stimulus channel will not be color coded even when other channels are color coded')\r\n monostim_checkbox.setCheckState(2 if self.options['monoStim'] else 0)\r\n self.settingDict['monoStim'] = monostim_checkbox\r\n\r\n SRC_checkbox = QtWidgets.QCheckBox('Stim=Current')\r\n SRC_checkbox.setToolTip('If checked, stimulus will be shifted to baseline current level')\r\n SRC_checkbox.setCheckState(2 if self.options['stimReflectCurrent'] else 0)\r\n self.settingDict['stimReflectCurrent'] = SRC_checkbox\r\n\r\n showInitVal = QtWidgets.QCheckBox(\"Show Initial Value\")\r\n showInitVal.setToolTip(\"Display the initial value at the beginning of the trace\")\r\n showInitVal.setCheckState(2 if self.options['showInitVal'] else 0)\r\n self.settingDict['showInitVal'] = showInitVal\r\n\r\n plotStimOnce = QtWidgets.QCheckBox(\"Plot Stim Once\")\r\n plotStimOnce.setToolTip('Check this if stimulus are identical across different episodes')\r\n plotStimOnce.setCheckState(2 if self.options['plotStimOnce'] else 0)\r\n self.settingDict['plotStimOnce'] = plotStimOnce\r\n \r\n saveDir_label = QtWidgets.QLabel('Path')\r\n saveDir_text = QtWidgets.QLineEdit(self.options['saveDir'])\r\n self.settingDict['saveDir'] = saveDir_text\r\n \r\n output_groupBox = QtWidgets.QGroupBox('Output')\r\n output_groupBox.setLayout(QtWidgets.QGridLayout())\r\n\r\n output_groupBox.layout().addWidget(annotation_label, 0, 0, 1, 1)\r\n output_groupBox.layout().addWidget(annotation_comboBox, 0, 1, 1, 1)\r\n #output_groupBox.layout().addWidget(annotfontSize_label, 0, 2, 1, 1)\r\n #output_groupBox.layout().addWidget(annotfontSize_text, 0, 3, 1, 1)\r\n output_groupBox.layout().addWidget(fontName_label, 1, 0, 1, 1)\r\n output_groupBox.layout().addWidget(fontName_text, 1, 1, 1, 1)\r\n output_groupBox.layout().addWidget(fontSize_label, 1, 2, 1, 1)\r\n output_groupBox.layout().addWidget(fontSize_text, 1, 3, 1, 1)\r\n output_groupBox.layout().addWidget(dpi_label, 2, 0, 1, 1)\r\n output_groupBox.layout().addWidget(dpi_text, 2, 1, 1, 1)\r\n output_groupBox.layout().addWidget(linewidth_label, 2, 2, 1, 1)\r\n output_groupBox.layout().addWidget(linewidth_text, 2, 3, 1, 1)\r\n output_groupBox.layout().addWidget(SRC_checkbox, 3, 0, 1, 1)\r\n output_groupBox.layout().addWidget(showInitVal, 3, 1, 1, 1)\r\n output_groupBox.layout().addWidget(monostim_checkbox, 3, 2, 1, 1)\r\n output_groupBox.layout().addWidget(plotStimOnce, 3, 3, 1, 1)\r\n output_groupBox.layout().addWidget(saveDir_label, 4, 0, 1, 1)\r\n output_groupBox.layout().addWidget(saveDir_text, 4, 1, 1, 3)\r\n \r\n # %% Organize widgets\r\n\r\n widgetFrame.layout().addWidget(size_groupBox)\r\n widgetFrame.layout().addWidget(concat_groupBox)\r\n widgetFrame.layout().addWidget(gridSpec_groupBox)\r\n widgetFrame.layout().addWidget(output_groupBox)\r\n widgetFrame.layout().addStretch(10)\r\n \r\n return widgetFrame\r\n\r\n def viewTabUI(self):\r\n widgetFrame = QtWidgets.QFrame()\r\n widgetFrame.setLayout(QtWidgets.QVBoxLayout())\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n widgetFrame.setSizePolicy(sizePolicy)\r\n widgetFrame.setObjectName(_fromUtf8(\"ViewWidgetFrame\"))\r\n\r\n # Default View Range\r\n stream_label = QtWidgets.QLabel(\"Stream\")\r\n min_label = QtWidgets.QLabel(\"Min\")\r\n max_label = QtWidgets.QLabel(\"Max\")\r\n time_label = QtWidgets.QLabel('Time')\r\n time_min_text = QtWidgets.QLineEdit(str(self.options['timeRangeMin']))\r\n time_max_text = QtWidgets.QLineEdit(str(self.options['timeRangeMax']))\r\n volt_label = QtWidgets.QLabel(\"Voltage\")\r\n volt_min_text = QtWidgets.QLineEdit(str(self.options['voltRangeMin']))\r\n volt_max_text = QtWidgets.QLineEdit(str(self.options['voltRangeMax']))\r\n cur_label = QtWidgets.QLabel(\"Current\")\r\n cur_min_text = QtWidgets.QLineEdit(str(self.options['curRangeMin']))\r\n cur_max_text = QtWidgets.QLineEdit(str(self.options['curRangeMax']))\r\n stim_label = QtWidgets.QLabel(\"Stimulus\")\r\n stim_min_text = QtWidgets.QLineEdit(str(self.options['stimRangeMin']))\r\n stim_max_text = QtWidgets.QLineEdit(str(self.options['stimRangeMax']))\r\n\r\n # Put objects into setting dictionary\r\n self.settingDict['timeRangeMin'] = time_min_text\r\n self.settingDict['timeRangeMax'] = time_max_text\r\n self.settingDict['voltRangeMin'] = volt_min_text\r\n self.settingDict['voltRangeMax'] = volt_max_text\r\n self.settingDict['curRangeMin'] = cur_min_text\r\n self.settingDict['curRangeMax'] = cur_max_text\r\n self.settingDict['stimRangeMin'] = stim_min_text\r\n self.settingDict['stimRangeMax'] = stim_max_text\r\n\r\n # Add to the groupbox\r\n view_groupBox = QtWidgets.QGroupBox('Default Range')\r\n view_groupBox.setLayout(QtWidgets.QGridLayout())\r\n view_groupBox.layout().addWidget(stream_label, 0, 0, 1, 1)\r\n view_groupBox.layout().addWidget(min_label, 0, 1, 1, 1)\r\n view_groupBox.layout().addWidget(max_label, 0, 2, 1, 1)\r\n view_groupBox.layout().addWidget(time_label, 1, 0, 1, 1)\r\n view_groupBox.layout().addWidget(time_min_text, 1, 1, 1, 1)\r\n view_groupBox.layout().addWidget(time_max_text, 1, 2, 1, 1)\r\n view_groupBox.layout().addWidget(volt_label, 2, 0, 1, 1)\r\n view_groupBox.layout().addWidget(volt_min_text, 2, 1, 1, 1)\r\n view_groupBox.layout().addWidget(volt_max_text, 2, 2, 1, 1)\r\n view_groupBox.layout().addWidget(cur_label, 3, 0, 1, 1)\r\n view_groupBox.layout().addWidget(cur_min_text, 3, 1, 1, 1)\r\n view_groupBox.layout().addWidget(cur_max_text, 3, 2, 1, 1)\r\n view_groupBox.layout().addWidget(stim_label, 4, 0, 1, 1)\r\n view_groupBox.layout().addWidget(stim_min_text, 4, 1, 1, 1)\r\n view_groupBox.layout().addWidget(stim_max_text, 4, 2, 1, 1)\r\n\r\n # Oragnize the widget\r\n widgetFrame.layout().addWidget(view_groupBox)\r\n widgetFrame.layout().addStretch(10)\r\n\r\n return widgetFrame\r\n\r\n def updateSettings(self, closeWidget=False):\r\n for k, v in self.settingDict.items():\r\n if isinstance(v, QtWidgets.QComboBox):\r\n val = v.currentText()\r\n elif isinstance(v, QtWidgets.QLineEdit):\r\n val = v.text()\r\n elif isinstance(v, QtWidgets.QCheckBox):\r\n val = True if v.checkState()>0 else False\r\n elif isinstance(v, QtWidgets.QSpinBox):\r\n val = v.value()\r\n else:\r\n raise(TypeError('Unrecognized type of setting item'))\r\n \r\n self.options[k] = val\r\n \r\n # save all the parameters\r\n writeini(iniPath=self.iniPath, options=self.options)\r\n \r\n if closeWidget:\r\n self.close()\r\n \r\n def toggleHFixedSpace(self, hSpace_comboBox, hSpace_spinbox, forbidden_text):\r\n if hSpace_comboBox.currentText() == forbidden_text:\r\n hSpace_spinbox.setEnabled(False)\r\n else:\r\n hSpace_spinbox.setEnabled(True)\r\n \r\n def closeEvent(self, event):\r\n \"\"\"Override default behavior when closing the main window\"\"\"\r\n self.isclosed = True\r\n \r\n# </editor-fold>\r\n\r\n\r\nif __name__ == '__main__':\r\n# iniPath = 'D:/Edward/Documents/Assignments/Scripts/Python/PySynapse/resources/config.ini'\r\n# with fileinput.input(iniPath, inplace=True, backup='.bak') as f:\r\n# for line in f:\r\n# if line[0] == '#':\r\n# print('#asdf.mat')\r\n# else:\r\n# print(line, end='')\r\n \r\n app = QtWidgets.QApplication(sys.argv)\r\n ex = Settings()\r\n ex.show()\r\n sys.exit(app.exec_())"
},
{
"alpha_fraction": 0.7346534729003906,
"alphanum_fraction": 0.7485148310661316,
"avg_line_length": 43.90909194946289,
"blob_id": "c2062b8eef01b7f5a8a9229db400c82145b8e31d",
"content_id": "46c55143df521d7df14d56e7c1e77ec434fb465c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 505,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 11,
"path": "/PySynapse/resources/ui_designer/Scope UI design.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "## Scope UI\r\n1. MainWindo\r\n2. GraphicsView -> resize to the left\r\n3. Dockwidget -> dock to the right for now.\r\n4. ListView -> or any widget, drag on top of dock widget\r\n\r\nNow, grouping things.\r\n\r\n5. Select centralwidget in 'Object Insepctor' on the right. Then, in the toolbar, select 'Layout Horizontally'\r\n6. Select the dockwidget and the listview together by clikcing them while holding down control, then also select 'Layout Horizontally'\r\n7. Resize the mainwindow, adjust the size of the dock. Save\r\n"
},
{
"alpha_fraction": 0.6675154566764832,
"alphanum_fraction": 0.7042561173439026,
"avg_line_length": 34.71428680419922,
"blob_id": "b5d0eccc7397ef71f454ae47664433da26b1f04d",
"content_id": "26ca8c7e41e2ffe5bd4a3fc7ad0959c2befa83e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2749,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 77,
"path": "/python_tutorials/practice_notes_1.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\n# Day 1: November 23, 2012\n\n# print()\n#default delimiter is \\n, which prints at a new line every line of print()\nprint('Hello, world!',\"I am okay\");#use single or double quotes are both fine\n\n#len()\nlen(\"asdffg\");#returns the length of the string\n\n# Converting between letter and integer (ASCII)\nord('a'); #--> integer\nchr(97); #--> unicode character\n\n# Concatenation\nfirst = 'throat';\nsecond = ' warbler';\nprint(first + second);\n#another example of concatenation\nthis_word = 'Spam';\nprint(this_word*3);# 'this_word', then, will be repeated 3 times in one string\n\n# Difference in Division between Python2 and Python3\n#In Python 2, / is the floor division,\n#whereas in Python 3, // is the floor division. This means, even if one of the number is float\n#if we call // in division operation, it is going to perform a floor division first,\n#Then convert the result to a float.\n#In Python 2, to use float division, we must convert one of the number into floats\n#whereas in Python 3, / is the float division\n\n# Checking the type of a variable / object\ntype(32); #--><type 'int'>\ntype ('32'); #--><type 'str'>\n\n# Type Conversion\nint('32'); #--> 32 from type str to type int\nint(3.99999); #--> 3\nint(-2.3333); #--> 2\nfloat(2); #-->2.0, from type int to type float\nfloat('23.424'); # 23.424, from type str to type float\nstr(32.32); #-->'32.32', from type float to type str\n\n# Math Modules and associated funtions\nimport math;#import math modules\nprint(math.pi);#returns constant pi\nprint(math.e);#returns natural number e\nprint(math.log(3,4));#returns log base 4 of 3\nprint(math.log10(20.3));#returns log base 10 of 20.3\nprint(math.log2(23));#returns log base 2 of 23, more accurate than using log(x,base)\nprint(math.exp(3));#returns e to the 3rd power\nprint(math.pow(2,3));#returns 2 raised to the 3rd power\nprint(math.sqrt(3));#returns square root of 3\n#other functions\n#math.sin, math.cos, math.tan,\n#math.atan2 (returns value in radians between -pi and pi)\n#math.degrees(x), math.radians(x)\n#For complex number, \"import cmath\" instead of \"import math\"\n#use cmath as the name of the module to call out these functions\n#We may also do\nfrom math import * #import all functions from the math module\npi #we now can use the functions from math directly, without typing math. every time\n\n# Functions\nmath_eq1=1+1;\nmath_eq2=2+1;\nmath_eq3=math.pi;\ndef let_it_all_out(a,b,c): #don't forget the colon after the parenthesis (which is for argument inputs)!\n print(\"Okay, let's do some math\");\n print(a);\n print(b);\n print(c);\n print(\"Good Job!\");\n #an empty line to signal the end of the function\n#now, call the function\nlet_it_all_out(math_eq1,math_eq2,math_eq3);\n\n#This concludes today's study."
},
{
"alpha_fraction": 0.5522723197937012,
"alphanum_fraction": 0.5731646418571472,
"avg_line_length": 36.383384704589844,
"blob_id": "b10851470f53de83b54911e4d6738ba209db405e",
"content_id": "2aa69e7afb77d490df99412c9c35c1c8a1e15c1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12014,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 313,
"path": "/generic/R.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 29 19:20:50 2016\r\n\r\nConvenient R functions in Python\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom pdb import set_trace\r\nimport gc\r\nimport psutil\r\nfrom joblib import Parallel, delayed\r\nimport multiprocessing\r\nfrom tqdm import tqdm\r\n\r\ndef aggregate(df, by, fun, select=None, subset=None, **kwargs):\r\n \"\"\"This should enhance the stupidly designed groupby functions.\r\n Will have to evoke this until it is fixed.\r\n\r\n df: pandas data frame\r\n by: columns to aggregate by\r\n fun: function(s) to apply. For examples:\r\n - fun = {'mean': np.mean, 'variance', np.var} will create 2 columns\r\n in the aggregated dataframe, 'mean' and 'variance', which stores\r\n the results of each aggregation\r\n - fun = ['sum', 'count', custom_function], apply the pandas built-in\r\n sum and count, as well as a custom_function defined by the user.\r\n The column aggregated by custom_function will be named\r\n 'custom_function'\r\n select: select columns to aggregate on, exclude other columns\r\n subset: select rows to aggregate on.\r\n \"\"\"\r\n if 'DataFrameGroupBy' not in str(type(df)):\r\n for b in by:\r\n df[b] = df[b].astype('category')\r\n\r\n df = subset(df, select, subset)\r\n gp = df.groupby(by)\r\n\r\n gp = gp.agg(fun)#, **kwargs)\r\n # Remove any nuisance rows with all nans\r\n gp = subset(gp, subset=~np.all(np.isnan(gp.values), axis=1))\r\n\r\n return gp\r\n\r\ndef subset(df, select=None, subset=None):\r\n \"\"\"\r\n select: columns to keep\r\n subset: rows to keep\r\n \"\"\"\r\n if select is not None:\r\n df = df[select]\r\n\r\n if select is not None:\r\n df = df.loc[subset, :]\r\n\r\n return df\r\n\r\ndef filterByCount(df, N=2, by=None, by2=None, keep_count=False):\r\n \"\"\"\r\n Remove the rows if the total number of rows associated with the aggregate\r\n is less than a threshold N\r\n by: column to aggregate by\r\n by2: aggreagte on top of the aggregation (summary of the summary)\r\n keep_count: keep the count column\r\n \"\"\"\r\n if not isinstance(by, list):\r\n raise(TypeError('parameter \"by\" must be a list'))\r\n df['filtered_count'] = 0\r\n gp = df.groupby(by=by, as_index=False, sort=False)\r\n ns0 = gp.count()\r\n ns0 = ns0[by+['filtered_count']]\r\n df.drop(columns=['filtered_count'], inplace=True)\r\n if by2 is not None:\r\n gp = ns0.groupby(by=by2, as_index=False, sort=False)\r\n ns1 = gp.count()\r\n ns1.drop(columns=np.setdiff1d(by, by2), inplace=True)\r\n df = df.merge(ns1, on=by2)\r\n else:\r\n df = df.merge(ns0, on=by)\r\n df = df.loc[df['filtered_count']>=N,:]\r\n if not keep_count:\r\n df.drop(columns=['filtered_count'], inplace=True)\r\n return df\r\n\r\ndef filterAND(df, AND, select=None):\r\n \"\"\"\r\n Filter rows of a dataframe based on the values in the specified columns\r\n\r\n AND: A dictionary, where the keys are the column names, and values are the\r\n the corresponding values of the column to apply AND filter on\r\n select: subset of columns to use\r\n \"\"\"\r\n AND_filter = []\r\n for key, val in AND.items():\r\n if isinstance(val, (list, tuple, np.ndarray)):\r\n for v in val:\r\n AND_filter.append((df[key] == v).values)\r\n else:\r\n AND_filter.append((df[key] == val).values)\r\n \r\n if select is None:\r\n if len(AND_filter) > 1:\r\n return df.loc[np.logical_and.reduce(AND_filter), :]\r\n else:\r\n return df.loc[AND_filter[0], :]\r\n else:\r\n if len(AND_filter) > 1:\r\n return df.loc[np.logical_and.reduce(AND_filter), select]\r\n else:\r\n return df.loc[AND_filter[0], select]\r\n\r\n\r\ndef filterOR(df, OR, select=None):\r\n \"\"\"\r\n Filter rows of a dataframe based on the values in the specified columns\r\n\r\n OR: A dictionary, where the keys are the column names, and values are the\r\n the corresponding values of the column to apply OR filter on\r\n select: subset of columns to use\r\n \"\"\"\r\n OR_filter = []\r\n try:\r\n for key, val in OR.items():\r\n if isinstance(val, (list, tuple, np.ndarray)):\r\n for v in val:\r\n OR_filter.append((df[key] == v).values)\r\n else:\r\n OR_filter.append((df[key] == val).values)\r\n if select is None:\r\n if len(OR_filter) > 1:\r\n return df.loc[np.logical_or.reduce(OR_filter), :]\r\n else:\r\n return df.loc[OR_filter[0], :]\r\n else:\r\n if len(OR_filter) > 1:\r\n return df.loc[np.logical_or.reduce(OR_filter), select]\r\n else:\r\n return df.loc[OR_filter[0], select]\r\n except:\r\n set_trace()\r\n\r\n\r\ndef filterRows(df, AND=None, OR=None, AndBeforeOr=True, select=None):\r\n \"\"\"\r\n Filter rows of a dataframe based on the values in the specified columns\r\n\r\n AND: A dictionary, where the keys are the column names, and values are the\r\n the corresponding values of the column to apply AND filter on\r\n OR: A dictionary like And, except to apply OR filter\r\n AndBeforeOr: If true (Default), apply AND filter before OR filter\r\n select: subset of columns to use\r\n \"\"\"\r\n # Make a copy of the df first\r\n dff = df.copy()\r\n if AndBeforeOr:\r\n # Apply AND filter\r\n dff = filterAND(dff, AND, select=select)\r\n # Apply OR filter\r\n dff = filterOR(dff, OR, select=select)\r\n else:\r\n # Apply AND filter\r\n dff = filterOR(dff, OR, select=select)\r\n # Apply OR filter\r\n dff = filterAND(dff, AND, select=select)\r\n \r\n return dff\r\n\r\n\r\ndef merge_size(left_frame, right_frame, on, how='inner'):\r\n \"\"\"\r\n Check memory usage of a merge\r\n \"\"\"\r\n left_groups = left_frame.groupby(on).size()\r\n right_groups = right_frame.groupby(on).size()\r\n left_keys = set(left_groups.index)\r\n right_keys = set(right_groups.index)\r\n intersection = right_keys & left_keys\r\n left_diff = left_keys - intersection\r\n right_diff = right_keys - intersection\r\n\r\n left_nan = len(left_frame[left_frame[on] != left_frame[on]])\r\n right_nan = len(right_frame[right_frame[on] != right_frame[on]])\r\n left_nan = 1 if left_nan == 0 and right_nan != 0 else left_nan\r\n right_nan = 1 if right_nan == 0 and left_nan != 0 else right_nan\r\n\r\n sizes = [(left_groups[group_name] * right_groups[group_name]) for group_name in intersection]\r\n sizes += [left_nan * right_nan]\r\n\r\n left_size = [left_groups[group_name] for group_name in left_diff]\r\n right_size = [right_groups[group_name] for group_name in right_diff]\r\n if how == 'inner':\r\n return sum(sizes)\r\n elif how == 'left':\r\n return sum(sizes + left_size)\r\n elif how == 'right':\r\n return sum(sizes + right_size)\r\n return sum(sizes + left_size + right_size)\r\n\r\ndef mem_fit(df1, df2, on, how='inner'):\r\n \"\"\"\r\n Check if a merge would fit in the memory\r\n \"\"\"\r\n rows = merge_size(df1, df2, on, how)\r\n cols = len(df1.columns) + (len(df2.columns) - 1)\r\n required_memory = (rows * cols) * np.dtype(np.float64).itemsize\r\n\r\n return required_memory <= psutil.virtual_memory().available\r\n\r\n\r\ndef applyParallel(dfGrouped, func, n_jobs=None):\r\n \"\"\"\r\n def apply_func(pandas_df):\r\n ...\r\n \r\n df = applyParallel(df.groupby(by=grouped_by_columns, as_index=False), apply_func)\r\n \"\"\"\r\n if n_jobs is None:\r\n n_jobs = multiprocessing.cpu_count()\r\n retLst = Parallel(n_jobs=n_jobs)(\\\r\n delayed(func)(group) for name, group in tqdm(dfGrouped)) # enumerate(tqdm(dfGrouped))\r\n return pd.concat(retLst)\r\n\r\n\r\ndef reduce_mem_usage(df, int_cast=True, obj_to_category=False, subset=None):\r\n \"\"\"\r\n Iterate through all the columns of a dataframe and modify the data type to reduce memory usage.\r\n :param df: dataframe to reduce (pd.DataFrame)\r\n :param int_cast: indicate if columns should be tried to be casted to int (bool)\r\n :param obj_to_category: convert non-datetime related objects to category dtype (bool)\r\n :param subset: subset of columns to analyse (list)\r\n :return: dataset with the column dtypes adjusted (pd.DataFrame)\r\n \"\"\"\r\n start_mem = df.memory_usage().sum() / 1024 ** 2;\r\n gc.collect()\r\n print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))\r\n\r\n cols = subset if subset is not None else df.columns.tolist()\r\n\r\n for col in cols:\r\n col_type = df[col].dtype\r\n\r\n if col_type != object and col_type.name != 'category' and 'datetime' not in col_type.name:\r\n c_min = np.nanmin(df[col].values)\r\n c_max = np.nanmax(df[col].values)\r\n\r\n # test if column can be cast down to a smaller integer\r\n treat_as_int = str(col_type)[:3] == 'int'\r\n\r\n if treat_as_int:\r\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\r\n df[col] = df[col].astype(np.int8)\r\n elif c_min > np.iinfo(np.uint8).min and c_max < np.iinfo(np.uint8).max:\r\n df[col] = df[col].astype(np.uint8)\r\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\r\n df[col] = df[col].astype(np.int16)\r\n elif c_min > np.iinfo(np.uint16).min and c_max < np.iinfo(np.uint16).max:\r\n df[col] = df[col].astype(np.uint16)\r\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\r\n df[col] = df[col].astype(np.int32)\r\n elif c_min > np.iinfo(np.uint32).min and c_max < np.iinfo(np.uint32).max:\r\n df[col] = df[col].astype(np.uint32)\r\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\r\n df[col] = df[col].astype(np.int64)\r\n elif c_min > np.iinfo(np.uint64).min and c_max < np.iinfo(np.uint64).max:\r\n df[col] = df[col].astype(np.uint64)\r\n else: # floats\r\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\r\n df[col] = df[col].astype(np.float16)\r\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\r\n df[col] = df[col].astype(np.float32)\r\n else:\r\n df[col] = df[col].astype(np.float64)\r\n elif 'datetime' not in col_type.name and obj_to_category:\r\n df[col] = df[col].astype('category')\r\n gc.collect()\r\n end_mem = df.memory_usage().sum() / 1024 ** 2\r\n print('Memory usage after optimization is: {:.3f} MB'.format(end_mem))\r\n print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\r\n\r\n return df\r\n\r\ndef columns2dict(df, key_cols, val_cols):\r\n \"\"\"Convert a dataframe's columns into key and val pairs\r\n Parameters:\r\n -----------\r\n * df: Pandas dataframe\r\n * key_cols: list of columns used as key\r\n * val_cols: list of columns used as value\r\n\r\n Example 1:\r\n df = pd.DataFrame({\"key1\":[1, 2, 3, 4], \"val1\":[10, 25, 35, 45]})\r\n out_dict = column2dict(df, \"key1\", \"val1\")\r\n >> {1: 10, 2: 25, 3: 35, 4: 45}\r\n \r\n Example 2:\r\n df = pd.DataFrame({\"key1\":[1, 2, 3, 4], \"key2\":[\"a\",\"b\",\"c\",\"d\"], \"val1\":[10, 25, 35, 45]})\r\n out_dict = columns2dict(df, [\"key1\", \"key2\"], [\"val1\"])\r\n >> {(1, 'a'): 10, (2, 'b'): 25, (3, 'c'): 35, (4, 'd'): 45}\r\n \r\n Example 3:\r\n df = pd.DataFrame({\"key1\":[1, 2, 3, 4], \"key2\":[\"a\",\"b\",\"c\",\"d\"], \"val1\":[10, 25, 35, 45], \"val2\":[102, 204, 308, 496]})\r\n out_dict = columns2dict(df, [\"key1\", \"key2\"], [\"val1\", \"val2\"])\r\n >> {(1, 'a'): [10, 102], (2, 'b'): [25, 204], (3, 'c'): [35, 308], (4, 'd'): [45, 496]}\r\n \"\"\"\r\n tmp = df[key_cols+val_cols].set_index(key_cols).T\r\n if len(val_cols) < 2:\r\n return tmp.to_dict('records')[0]\r\n else:\r\n return tmp.to_dict('list')\r\n"
},
{
"alpha_fraction": 0.6399999856948853,
"alphanum_fraction": 0.653333306312561,
"avg_line_length": 16.75,
"blob_id": "e0be007a1cb519a079b27113cf88a3e09329ea13",
"content_id": "0bcaa8a8753cd44afd4ec8ad8a69c3db8a1376fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 4,
"path": "/Plots/README.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# README #\r\n\r\n## To Do ##\r\n1. Gridspec arrangement of subplots for traces\r\n"
},
{
"alpha_fraction": 0.484375,
"alphanum_fraction": 0.5021159052848816,
"avg_line_length": 31.85026741027832,
"blob_id": "110f7f2b1698915782d4d5696c7b6dad1050a86f",
"content_id": "bb5fa7b7671faaa1ca4b9f74c3d53dede23ab615",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6144,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 187,
"path": "/Spikes/spikedetekt2/dev/file_format/kwikformat_hdf5.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# File format specification\n\n## Data files\n\nAll files are in HDF5.\n\n * The data are stored in the following files:\n \n * the **KWIK** file is the main file, it contains:\n * all metadata\n * spike times\n * clusters\n * recording for each spike time\n * probe-related information\n * information about channels\n * information about cluster groups\n * events, event_types\n * aesthetic information, user data, application data\n * the **KWX** file contains the **spiking data**: features, masks, waveforms\n * the **KWD** files contain the **raw/filtered recordings**\n \n * Once spike sorting is finished, one can discard the KWX and KWD files and just keep the KWIK file for subsequent analysis (where spike sorting information like features, waveforms... are not necessary).\n\n * All files contain a **version number** in `/` (`kwik_version` attribute), which is an integer equal to 2 now.\n\n * The input files the user provides to the programs to generate these data are:\n \n * the **raw data** coming out from the acquisition system, in any proprietary format (NS5, etc.)\n * processing parameters (PRM file) and description of the probe (PRB file)\n \n\n### KWIK\n\nBelow is the structure of the KWIK file.Everything is a group, except fields with a star (*) which are either leaves (datasets: arrays or tables) or attributes of their parents.\n\n[X] is 0, 1, 2...\n \n /kwik_version* [=2]\n /name*\n /application_data\n spikedetekt\n MY_SPIKEDETEKT_PARAM*\n ...\n /user_data\n /channel_groups\n [X]\n name*\n adjacency_graph* [Kx2 array of integers]\n application_data\n user_data\n channels\n [X]\n name*\n kwd_index*\n ignored*\n position* (in microns relative to the whole multishank probe)\n voltage_gain* (in microvolts)\n display_threshold*\n application_data\n klustaviewa\n spikedetekt\n user_data\n spikes\n time_samples* [N-long EArray of UInt64]\n time_fractional* [N-long EArray of UInt8]\n recording* [N-long EArray of UInt16]\n cluster* [N-long EArray of UInt32]\n cluster_original* [N-long EArray of UInt32]\n features_masks\n hdf5_path* [='{kwx}/channel_groups/X/features_masks']\n waveforms_raw\n hdf5_path* [='{kwx}/channel_groups/X/waveforms_raw']\n waveforms_filtered\n hdf5_path* [='{kwx}/channel_groups/X/waveforms_filtered']\n clusters\n [X]\n application_data\n klustaviewa\n color*\n cluster_group*\n mean_waveform_raw*\n mean_waveform_filtered*\n quality_measures\n isolation_distance*\n matrix_isolation*\n refractory_violation*\n amplitude*\n user_data\n ...\n cluster_groups\n [X]\n name*\n application_data\n klustaviewa\n color*\n user_data\n /recordings\n [X]\n name*\n start_time*\n start_sample*\n sample_rate*\n bit_depth*\n band_high*\n band_low*\n raw\n hdf5_path* [='{raw.kwd}/recordings/X']\n high\n hdf5_path* [='{high.kwd}/recordings/X']\n low\n hdf5_path* [='{low.kwd}/recordings/X']\n user_data\n /event_types\n [X]\n user_data\n application_data\n klustaviewa\n color*\n events\n time_samples* [N-long EArray of UInt64]\n recording* [N-long EArray of UInt16]\n user_data [group or EArray]\n\n### KWX\n\nThe **KWX** file contains spike-sorting-related information.\n\n /channel_groups\n [X]\n features_masks* [(N x NFEATURES x 2) EArray of Float32]\n waveforms_raw* [(N x NWAVESAMPLES x NCHANNELS) EArray of Int16]\n waveforms_filtered* [(N x NWAVESAMPLES x NCHANNELS) EArray of Int16]\n\n### KWD\n\nThe **KWD** files contain the original recordings (raw and filtered). Each file among the `.raw`, `.high` and `.low` contains:\n\n /recordings\n [X]\n data* [(NSAMPLES x NCHANNELS) EArray of Int16]\n filter\n name*\n param1*\n downsample_factor*\n\n\n## User files\n\n### PRB\n\nThis JSON text file describes the probe used for the experiment: its geometry, its topology, and the dead channels.\n\n {\n \"channel_groups\": \n [\n {\n \"channels\": [0, 1, 2, 3],\n \"graph\": [[0, 1], [2, 3], ...],\n \"geometry\": {\"0\": [0.1, 0.2], \"1\": [0.3, 0.4], ...}\n },\n {\n \"channels\": [4, 5, 6, 7],\n \"graph\": [[4, 5], [6, 7], ...],\n \"geometry\": {\"4\": [0.1, 0.2], \"5\": [0.3, 0.4], ...}\n }\n ]\n }\n\n\n### PRM\n\nThis Python script defines all parameters necessary for the programs to process, open and display the data.\n\n EXPERIMENT_NAME = 'myexperiment'\n RAW_DATA_FILES = ['n6mab041109blahblah1.ns5', 'n6mab041109blahblah2.ns5']\n PRB_FILE = 'buzsaki32.probe'\n NCHANNELS = 32\n SAMPLING_FREQUENCY = 20000.\n IGNORED_CHANNELS = [2, 5]\n NBITS = 16\n VOLTAGE_GAIN = 10.\n WAVEFORMS_NSAMPLES = 20 # or a dictionary {channel_group: nsamples}\n FETDIM = 3 # or a dictionary {channel_group: fetdim}\n # ...\n \n # SpikeDetekt parameters file\n # ...\n\n"
},
{
"alpha_fraction": 0.5473684072494507,
"alphanum_fraction": 0.5666666626930237,
"avg_line_length": 42.846153259277344,
"blob_id": "e0c337f52bbf6dc312a69c1a46ecc5a37dfe26d0",
"content_id": "00db0bc46d20a3d9ddf0c560a4cebe5f396688b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4560,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 104,
"path": "/ReadNWrite/create_xls_charts_example.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\" \nUse XlsxWriter to create charts and plot in xlsx files\n\"\"\"\nimport sys\nsys.path.append('/hsgs/projects/jhyoon1/pkg64/pythonpackages/')\nsys.path.append('/hsgs/projects/jhyoon1/pkg64/pythonpackages/XlsxWriter-0.5.1')\n# Import xlswriter library\nimport xlrd\nimport xlsxwriter\n# Open the Excel workbook to import data\nworkbook = xlrd.open_workbook('/hsgs/projects/jhyoon1/midbrain_pilots/haldol/MID_ROI_betas.xlsx')\n# Open another Excel workbook to write the data and create charts\nchart_workbook = xlsxwriter.Workbook('/hsgs/projects/jhyoon1/midbrain_pilots/haldol/MID_ROI_betas_2.xlsx');\n# Get a list of worksheets present in the workbook\nworksheet_list = workbook.sheet_names();\n# Convert all contents to strings\nworksheet_list = [str(i) for i in worksheet_list];\nfor w in range(0,len(worksheet_list)):\n # Get the worksheet\n worksheet = None;# clear the variable beforehand\n worksheet = workbook.sheet_by_name(worksheet_list[w]);\n # Grab the cell contents to be plotted\n # Column headers\n col_headers = worksheet.row_values(29);\n col_headers = col_headers[2:];\n col_headers = [str(i) for i in col_headers];\n \n # Original subject by subject data\n Original_Data = None;\n Original_Data = [[]]*worksheet.nrows;\n for r in range (0, worksheet.nrows):\n tmp = None;\n tmp = worksheet.row_values(r);\n # convert all unicode to string and keep all float\n Original_Data[r] = [str(i) if isinstance(i,basestring) else i for i in tmp]\n \n\n # With data imported and saved, create charts\n chart_worksheet = None;\n chart_worksheet = chart_workbook.add_worksheet(worksheet_list[w])\n # Write the imported data to the new worksheet\n for r in range(0,worksheet.nrows):\n chart_worksheet.write_row('A'+str(r+1),Original_Data[r]);\n \n # Create a new Chart object\n chart = None;\n chart = chart_workbook.add_chart({'type':'column'});\n # Configure the first chart\n chart.add_series({'values':worksheet_list[w]+'!$C$31:$H$31',\n 'categories':worksheet_list[w]+'!$C$30:$H$30',\n 'name':'C',\n 'y_error_bars':{\n 'type':'fixed',\n 'value':worksheet_list[w]+'!$C$41:$H$41',\n 'end_style':1,\n 'direction':'both'}});#Cue_C\n chart.add_series({'values':worksheet_list[w]+'!$C$32:$H$32',\n 'categories':worksheet_list[w]+'!$C$30:$H$30',\n 'name':'SZ',\n 'y_error_bars':{\n 'type':'fixed',\n 'value':worksheet_list[w]+'$!C$42:$H$42',\n 'end_style':1,\n 'direction':'both'}});#Cue_C});#Cue_SZ\n chart.set_title({'name':worksheet_list[w]+' Cue Betas by Groups by Conditions'});\n chart.set_legend({'position':'right'});\n chart.set_size({'width':720,'height':576});\n chart.set_x_axis({'name':'Conditions'});\n chart.set_y_axis({'name':'Beta Values',\n 'major_gridlines':{'visible':False}});\n # Insert the chart\n chart_worksheet.insert_chart('B48',chart);\n\n # Configure the second chart\n chart = None;\n chart = chart_workbook.add_chart({'type':'column'});\n chart.add_series({'values':worksheet_list[w]+'!$I$31:$T$31',\n 'categories':worksheet_list[w]+'!$I$30:$T$30',\n 'name':'C',\n 'y_error_bars':{\n 'type':'fixed',\n 'value':worksheet_list[w]+'$!I$41:$T$41',\n 'end_style':1,\n 'direction':'both'}});#Feedback_C\n chart.add_series({'values':worksheet_list[w]+'!$I$32:$T$32',\n 'categories':worksheet_list[w]+'!$I$30:$T$30',\n 'name':'SZ',\n 'y_error_bars':{\n 'type':'fixed',\n 'value':worksheet_list[w]+'$!I$42:$T$42',\n 'end_style':1,\n 'direction':'both'}});#Feedback_SZ\n # Insert the second chart\n chart.set_title({'name':worksheet_list[w]+' Feedback Betas by Groups by Conditions'});\n chart.set_legend({'position':'right'});\n chart.set_size({'width':1500,'height':576});\n chart.set_x_axis({'name':'Conditions'});\n chart.set_y_axis({'name':'Beta Values',\n 'major_gridlines':{'visible':False}});\n # Insert the chart\n chart_worksheet.insert_chart('B80',chart);\n\n# At the end, close the workbook\nchart_workbook.close();\n"
},
{
"alpha_fraction": 0.45898160338401794,
"alphanum_fraction": 0.48656293749809265,
"avg_line_length": 35.842105865478516,
"blob_id": "eb232203b5b3656d65d7b743734ad208f9cb9b82",
"content_id": "b7c5906a47d6d5e4b573f6b4122399577c5bf9af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1414,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 38,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/tests/test_filtering.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Filtering tests.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom scipy import signal\nfrom spikedetekt2.processing import (apply_filter, bandpass_filter,\n get_whitening_matrix, whiten, decimate)\n\n\n# -----------------------------------------------------------------------------\n# Tests\n# -----------------------------------------------------------------------------\ndef test_apply_filter():\n \"\"\"Test bandpass filtering on a combination of two sinusoids.\"\"\"\n rate = 10000.\n low, high = 100., 200.\n # Create a signal with small and high frequencies.\n t = np.linspace(0., 1., rate)\n x = np.sin(2*np.pi*low/2*t) + np.cos(2*np.pi*high*2*t)\n # Filter the signal.\n filter = bandpass_filter(filter_low=low,\n filter_high=high, filter_butter_order=4, sample_rate=rate)\n x_filtered = apply_filter(x, filter=filter)\n # Check that the bandpass-filtered signal is weak.\n assert np.abs(x[int(2./low*rate):-int(2./low*rate)]).max() >= .9\n assert np.abs(x_filtered[int(2./low*rate):-int(2./low*rate)]).max() <= .1\n \ndef test_decimate():\n x = np.random.randn(16000, 3)\n y = decimate(x)\n \ndef test_whitening():\n x = np.random.randn(10000, 2)\n x[:, 1] += x[:,0]\n M = get_whitening_matrix(x)\n # TODO\n \n \n "
},
{
"alpha_fraction": 0.5506204962730408,
"alphanum_fraction": 0.5713878870010376,
"avg_line_length": 36.09408950805664,
"blob_id": "a35195cc77c012bfe6f7c37f186bb9895e789d53",
"content_id": "6e2ae040b11569e9ac73d566be5648df0e304fd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31588,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 829,
"path": "/Spikes/spk_util.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 19 17:19:49 2015\r\n\r\nSome routines for electrophysiology data analyses\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport scipy.signal as sg\r\nimport scipy.stats as st\r\nfrom scipy.interpolate import interp1d\r\nfrom scipy.optimize import curve_fit\r\n\r\n# from pdb import set_trace\r\ntry:\r\n from MATLAB import *\r\nexcept:\r\n try:\r\n from util.MATLAB import *\r\n except:\r\n sys.path.append('D:/Edward/Documents/Assignments/Scripts/Python/Spikes/')\r\n from MATLAB import *\r\n\r\n\r\ndef time2ind(t, ts, t0=0):\r\n \"\"\"Convert a time point to index of vector\r\n ind = time2ind(t, ts, t0)\r\n Inputs:\r\n t: current time in ms\r\n ts: sampling rate in ms\r\n t0: (optional) time in ms the first index corresponds\r\n to. Defualt is 0.\r\n\r\n Note that as long as t, ts, and t0 has the same unit of time,\r\n be that second or millisecond, the program will work.\r\n\r\n Output:\r\n ind: index\r\n \"\"\"\r\n if np.isscalar(t):\r\n t = [t]\r\n\r\n return(np.array([int(a) for a in (np.array(t) - t0) / ts]))\r\n\r\ndef ind2time(ind, ts, t0=0):\r\n \"\"\"Convert an index of vector to temporal time point\r\n t = ind2time(ind, ts, t0=0)\r\n Inputs:\r\n ind: current index of the vector\r\n ts: sampling rate in ms\r\n t0: (optional) what time in ms does the first index\r\n correspond to? Defualt is 0\r\n\r\n Note that as long as t, ts, and t0 has the same unit of time,\r\n be that second or millisecond, the program will work.\r\n\r\n Output:\r\n t: current time in ms\r\n \"\"\"\r\n if np.isscalar(ind):\r\n ind = [ind]\r\n return(np.array(ind) * ts + t0)\r\n\r\ndef spk_window(Vs, ts, Window, t0=0):\r\n \"\"\"Window the time series\r\n Vs = spk_window(Vs, ts, Window, t0=0)\r\n Inputs:\r\n Vs: voltage time series\r\n ts: sampling rate [ms]\r\n Window: temporal window, in the format of [min_sec, max_sec]\r\n t0: (optional) what time in seconds does the first index\r\n correspond to? Defualt is 0.\r\n\r\n Note that as long as ts, Window, and t0 has the same unit,\r\n be that second or millisecond, the program will work.\r\n\r\n Output:\r\n Vs: windows Vs\r\n \"\"\"\r\n dur = len(Vs)\r\n # Start / end indices\r\n def parse_window(x, none_allowed, min_allowed, max_allowed, func):\r\n if x is None or np.isnan(x):\r\n x = none_allowed\r\n else:\r\n x = func(x) # apply the transformation\r\n if x < min_allowed:\r\n x = min_allowed\r\n elif x > max_allowed:\r\n x = max_allowed\r\n\r\n return x\r\n\r\n func = lambda y: time2ind(y, ts, t0)[0]\r\n start_ind = parse_window(Window[0], 0, 0, dur, func=func)\r\n end_ind = parse_window(Window[1], dur, 0, dur, func=func)\r\n\r\n return Vs[start_ind:end_ind]\r\n\r\n\r\ndef spk_average(Vs, ts=None, Window=None, axis=0, t0=0):\r\n \"\"\"Find the average of a series of traces\r\n Vs = spk_averagetrace(Vs, ts, Window, axis=0, t0=0)\r\n Inputs:\r\n Vs: time series\r\n ts: sampling rate [ms]. Necessary when specified Window.\r\n Window: temporal window, in the format of [min_ms, max_ms]\r\n dim: dimension to averge the trace.\r\n * None: average everything.\r\n * 0 over time. (Default)\r\n * 1 over trials.\r\n t0: (optional) what time in seconds does the first index\r\n correspond to? Defualt is 0.\r\n \"\"\"\r\n # Window the time series\r\n if Window is not None:\r\n if ts is None:\r\n raise(ValueError('Please specify sampling rate ts.'))\r\n Vs = spk_window(Vs, ts, Window, t0);\r\n\r\n # Take the average\r\n if axis is not None:\r\n Vs = np.mean(Vs, axis=axis)\r\n else:\r\n Vs = np.mean(Vs) # average everything\r\n\r\n return(Vs)\r\n\r\n\r\ndef spk_count(Vs, ts, window=None, msh=-10.0, msd=1.0, **kwargs):\r\n \"\"\" Count the number of action potentials given a time series, using simple\r\n threshold based peak detection algorithm\r\n num_spikes, spike_time, spike_heights = spk_count(Vs, ts, **kwargs)\r\n Inputs:\r\n * Vs: voltage time series [mV].\r\n * ts: sampling rate [ms]\r\n * msh: minimum height of the spike. Default -10.0 [mV].\r\n * msd: minimum distances between detected spikes. Default 1.0 [ms].\r\n * window: window the Vs trace first before counting the spike. \r\n Default None.\r\n **kwargs: optional inputs for \"findpeaks\"\r\n\r\n Note that min_spike_height needs to be in the same unit as Vs;\r\n min_spike_dist needs to be in the same unit as ts.\r\n\r\n Outputs:\r\n * num_spikes: number of spikes for each trial\r\n * spike_time: indices of the spike, returned as one cell array of time\r\n vectors per trial\r\n * spike_heights: voltage of the spike [mV], returned as one cell array\r\n of spike heights per trial\r\n \"\"\"\r\n if window is not None:\r\n Vs = spk_window(Vs, ts, window)\r\n # find spikes\r\n if msd is not None:\r\n msd = float(msd) / float(ts)\r\n ind, spike_heights = findpeaks(Vs, mph=msh, mpd=msd, **kwargs)\r\n # Count number of spikes\r\n num_spikes = len(ind)\r\n # Convert spike timing\r\n spike_time = ind2time(ind, ts)\r\n\r\n return(num_spikes, spike_time, spike_heights)\r\n \r\ndef spk_bin(spk_times, stim=[0,2000], num_bins=40, cumulative=True):\r\n \"\"\"\r\n Binning spikes given spike times\r\n * spk_times: time of spikes\r\n * stim: stimulus duration, or the window of which the spikes are binned from\r\n * num_bins: number of bins given the duration of the window\r\n * cumulative: cumulatively sum the spike count so far, default True\r\n \"\"\"\r\n time_bins = np.linspace(0, stim[1]-stim[0], num_bins+1)\r\n time_bins = np.c_[time_bins[:-1], time_bins[1:]]\r\n time_bins = np.c_[time_bins.mean(axis=1), time_bins]\r\n \r\n spike_bins = np.zeros(time_bins.shape[0])\r\n spk_times = spk_times - stim[0]\r\n \r\n for n, t in enumerate(time_bins):\r\n spike_bins[n] = np.logical_and(spk_times >= time_bins[n, 1], spk_times < time_bins[n, 2]).sum(dtype=int)\r\n \r\n if cumulative:\r\n spike_bins = np.cumsum(spike_bins, dtype=int)\r\n \r\n return spike_bins, time_bins\r\n\r\ndef spk_filter(Vs, ts, Wn, N=6, btype='bandpass'):\r\n \"\"\"Filter time series\r\n Vs: time series\r\n ts: sampling rate in [ms]. IMPORTANT: 'ts' must be millisecond\r\n Wn: cutoff frequency of the filter\r\n N: order of filter. Default 6.\r\n \"\"\"\r\n Nq = 1.0/ts*1000/2\r\n #wp, ws = np.array(wp)/Nq, np.array(ws)/Nq\r\n #N, Wn = sg.buttord(wp, ws, gpass=gp, gstop=gs, analog=False)\r\n Wn = np.array(Wn) / Nq\r\n b, a = sg.butter(N, Wn, btype=btype, analog=False, output='ba')\r\n Vs_mean = np.mean(Vs)\r\n Vs -= Vs_mean\r\n l = len(Vs)\r\n pad = 2**nextpow2(l)\r\n if (pad - l) < (0.1*l):\r\n pad = 2**(nextpow2(l)+1)\r\n pad = pad - l # legnth of padding\r\n Vs = np.concatenate((Vs, np.zeros(1,pad)))\r\n Vs = sg.filtfilt(b, a, Vs, axis=-1, padtype=None, padlen=None)\r\n Vs = Vs[0:l] + Vs_mean\r\n return(Vs)\r\n\r\n\r\ndef spk_dirac(ts=1., dur=1, phi=0., h=1., collapse=True):\r\n \"\"\"Make (summed) Dirac Delta function\r\n delta = spk_dirac(ts=1., dur=1., phi=0., h=1., collapse=True)\r\n Inputs:\r\n ts: sampling rate in seconds. Default is 1 second.\r\n dur: duration of the time series in seconds.\r\n ~Input a single value so that the time window is twice of this\r\n input, centered at 0. e.g. dur = 5 --> [-5, 5] time window\r\n ~Alternatively, input as a time window in seconds, e.g.\r\n dur = [-2, 5].\r\n ~If no input, default [-1, 1] window.\r\n phi: phase shift [seconds] of the Delta function. Default is 0 as in\r\n classic Dirac delta function. Input as a vector to return one\r\n Delta function for each phi.\r\n h: Height of the singularity (origin) where non-zero value occur.\r\n This can be either a scalar or an array of the same size as phi.\r\n Deafult heigth is 1.\r\n collapse: [true|false] collaspe Dirac function with different phase\r\n shifts by adding across phi (columns). Default is true.\r\n\r\n Output:\r\n delta: if not collpased, returns a matrix with row corresponding to\r\n time and columns corresponding to different phi; if collapsed,\r\n only a column vector is returned.\r\n\r\n Example usage: eph_dirac(1, [-5,5],[-2,-3,1,3],1,true).\r\n Returns a vector [0;0;1;1;0;0;1;0;1;0;0];\r\n \"\"\"\r\n if np.isscalar(dur):\r\n dur = [-dur, dur]\r\n\r\n phi_ind = time2ind(phi, ts, dur[0])\r\n if collapse:\r\n # initialize the vector\r\n delta = np.zeros(int(1+np.diff(time2ind(dur, ts))))\r\n delta[phi_ind] = h\r\n else:\r\n # initialize a matrix\r\n delta = np.zeros((int(1+np.diff(time2ind(dur, ts))), np.size(phi)))\r\n for p, n in enumerate(phi_ind):\r\n delta[p, n] = h\r\n\r\n return(delta)\r\n\r\ndef spk_firing_rate(Vs, ts, method='gaussian', debug=False, sigma=300., n=5,\r\n window=500.):\r\n \"\"\"Estimate a continuous, time varying firing rate\r\n R = spk_firing_rate(Vs, ts, method, ...)\r\n Inputs:\r\n Vs: voltage time series, N x M matrix with N time points and M trials\r\n in units of [mV]\r\n ts: sampling rate [ms]\r\n method: method of calculating firing rate of a single trial. Default is\r\n 'gaussian'. Available options are the following:\r\n 1). 'gaussian': specify a Gaussian moving kernel to calculate firing\r\n rate. (..., 'gaussian', sigma=0.3., n=5),\r\n where sigma is the standard deviation (default is 0.3s)\r\n and n is the number of standard deviations of Gaussian\r\n kernel to use to convolve the data (default 5).\r\n 2). 'rect': sepcify a rectangular moving kernel to calculate firing\r\n rate. The default setting is (..., 'rect', window=0.5),\r\n which specifies a moving kernel of 500ms.\r\n debug: turn on debug to print spike detection message.\r\n Outputs:\r\n R: time series of the same dimension as Vs, containing calculated\r\n firing rate in units of [Hz]\r\n\r\n A note for the unit: if ts, sigma, and window are in ms, the returned\r\n firing rate will be in MHz, instead of Hz.\r\n\r\n Implementation based on review by:\r\n Cunningham, J.P., Gilja, V., Ryu, S.I., Shenoy, K.V. Methods for\r\n Estimating Neural Firing Rates, and Their Application to Brain-Machine\r\n Interfaces. Neural Network. 22(9): 1235-1246 (2009).\r\n \"\"\"\r\n # Detect spikes first\r\n _, t_spk, _ = spk_count(Vs, ts)\r\n assert t_spk is not None and len(t_spk)>0, \"No spikes detected\"\r\n if debug is True:\r\n print(\"Detected %d spikes\\n\"%(len(t_spk)))\r\n t_window = ind2time([0, len(Vs)-1], ts)\r\n\r\n # Make dirac function based on spike times\r\n R = spk_dirac(ts, t_window, t_spk, 1., True)\r\n\r\n # Switch between selection of convolution functions\r\n if method == 'gaussian':\r\n w = stationary_gaussian_kernel(ts, sigma=sigma, n=n)\r\n mtype = 'ks' # kernel smoothing (stationay)\r\n elif method == 'rect':\r\n w = stationary_rect_kernel(ts, window=window)\r\n mtype = 'ks' # kernel smoothing (stationary)\r\n else:\r\n raise(NotImplementedError('%s kernel method is not implemented'\\\r\n %(method)))\r\n\r\n if mtype == 'ks': # kernel smoothing (stationary)\r\n # Convolve to get the firing rate\r\n R = np.convolve(R, w, mode='same')\r\n\r\n # convert from MHz to Hz\r\n R *= 1000.\r\n\r\n return(R)\r\n\r\ndef stationary_gaussian_kernel(ts, sigma=300., n=5):\r\n \"\"\"Make gaussian kernel centered at 0\r\n ts: sampling rate [ms]\r\n n: use n standard deviations below and above 0 (mean).\r\n sigma: standard deviation (width of Gaussian kernel) [ms].\r\n During Up state, sigma = 10ms according to:\r\n Neske, G.T., Patrick, S.L., Connor, B.W. Contributions of\r\n Diverse Excitatory and Inhibitory Neurons to Recurrent Network\r\n Activity in Cerebral Cortex. The Journal of Nueroscience.\r\n 35(3): 1089-1105 (2015). But this sd size may be too small for\r\n other processes. So default is set to 300ms for a smoother\r\n firing rate curve.\r\n \"\"\"\r\n t = np.arange(-n*sigma, n*sigma+ts, ts)\r\n w = 1./(np.sqrt(2.*np.pi)*sigma)*np.exp(-t**2/(2.*sigma**2))\r\n return(w)\r\n\r\n\r\ndef stionary_rect_kernel(ts, window=500.):\r\n \"\"\"Make rectangular kernel\r\n ts: sampling rate [ms]\r\n window:window length in [ms]\r\n \"\"\"\r\n # boxcar function\r\n t = time2ind(window, ts)\r\n w = np.concatenate((np.zeros(10), np.ones(t), np.zeros(10)))\r\n return(w)\r\n\r\n\r\ndef detectPSP_template_matching(Vs, ts, event, w=200, tau_RISE=1, tau_DECAY=4, mph=0.5, mpd=1, step=1, criterion='se', thresh=3):\r\n \"\"\"Intracellular post synaptic potential event detection based on\r\n Jonas et al, 1993: Quantal components of unitary EPSCs at the mossy fibre synapse on CA3 pyramidal cells of rat hippocampus.\r\n Clements and Bekkers, 1997: Detection of spontaneous synaptic events with an optimally scaled template.\r\n Cited by Guzman et al., 2014: Stimfit: quantifying electrophysiological data with Python\r\n Inputs:\r\n * Vs: voltage or current time series\r\n * ts: sampling rate [ms]\r\n * event: event type\r\n * w: window of the template [ms]\r\n * tau_RISE: rise time of the template gamma function [ms]. Default 1ms\r\n * tau_DECAY: decay time of the template gamma function [ms]. Default 4ms\r\n * mph: minimum event size [mV]. Default 0.5mV\r\n * mpd: minimum event distance [ms]. Default 1ms\r\n * step: step size to match template. Default is 1ms\r\n * criterion: ['se'|'corr']\r\n 'se': standard error [Default]\r\n 'corr': correlation\r\n * thresh: threshold applied on detection criterion to detect the event.\r\n This value depends on the criterion selected.\r\n 'se': which is the default setting. Recommend 3 [Default]\r\n 'corr': Recommend 0.95 (significance level of the correlation)\r\n\r\n \"\"\"\r\n step = step/ts\r\n t_vect = np.arange(0,w+ts,ts)\r\n def p_t(t, tau_RISE, tau_DECAY): # Template function. Upward PSP\r\n g = (1.0 - np.exp(-t/tau_RISE)) * np.exp(-t/tau_DECAY)\r\n g = g / np.max(g) # normalize the peak\r\n return g\r\n\r\n p = p_t(t_vect, tau_RISE, tau_DECAY)\r\n\r\n # Do some preprocessing first\r\n r_mean = np.mean(Vs)\r\n r = Vs - r_mean\r\n r = np.concatenate((r, np.zeros_like(p)))\r\n # length of trasnversion\r\n h = len(p)\r\n # Append some zeros\r\n chi_sq = np.zeros((np.arange(0, len(Vs), step).size,4)) # store fitting results\r\n A = np.vstack([p, np.ones(h)]).T\r\n for n, k in enumerate(np.arange(0, len(Vs), step, dtype=int)): # has total l steps\r\n chi_sq[n,0] = int(k) # record index\r\n r_t = r[int(k):int(k+h)]\r\n q = np.linalg.lstsq(A, r_t)\r\n m, c = q[0] # m=scale, c=offset\r\n chi_sq[n, 1:3] = q[0]\r\n if criterion=='se':\r\n chi_sq[n,3] = float(q[1]) # sum squared residual\r\n elif criterion == 'corr':\r\n chi_sq[n,3] = np.corrcoef(r_t, m*p+c)[0,1]\r\n\r\n if criterion=='se':\r\n DetectionCriterion = chi_sq[:,1] / (np.sqrt(chi_sq[:,3]/(h-1)))\r\n if event in ['IPSP', 'EPSC']:\r\n DetectionCriterion = -1.0 * DetectionCriterion\r\n elif criterion=='corr':\r\n DetectionCriterion = chi_sq[:,3]\r\n DetectionCriterion = DetectionCriterion / np.sqrt((1-DetectionCriterion**2) /(h-2)) # t value\r\n DetectionCriterion = 1-st.t.sf(np.abs(DetectionCriterion), h-1)\r\n\r\n # Run through general peak detection on the detection criterion trace\r\n ind, _ = findpeaks(DetectionCriterion, mph=thresh, mpd=mpd/ts)\r\n\r\n pks = chi_sq[ind,1]\r\n ind = chi_sq[ind,0]\r\n # Filter out the detected events that is less than the minimum peak height requirement\r\n if event in ['EPSP', 'IPSC']:\r\n valid_ind = pks>abs(mph)\r\n else:\r\n valid_ind = pks<-1*abs(mph)\r\n\r\n pks = pks[valid_ind]\r\n ind = ind[valid_ind]\r\n event_time = ind2time(ind, ts)\r\n\r\n\r\n return event_time, pks, DetectionCriterion, chi_sq\r\n\r\n\r\ndef detectPSP_deconvolution():\r\n return\r\n\r\ndef detrending(Vs, ts, mode='linear'):\r\n \"\"\"Detrend the data. Useful for calculating mean independent noise.\r\n mode:\r\n 'mean': simply remove mean\r\n 'linear' (Deafult),'nearest', 'zero', 'slinear', 'quadratic', 'cubic': using interp1d\r\n 'polyN': fit a polynomial for Nth degree. e.g. 'poly3' fits a cubic curve\r\n Do not mistake 'linear' mode as removing a global linear trend. For removing global linear trend,\r\n use 'poly1'\r\n\r\n Note that after detrending the mean would be zero. To keep the mean of the\r\n trace, remove mean before detrending, then add mean back.\r\n \"\"\"\r\n if mode=='mean':\r\n return Vs - np.mean(Vs)\r\n else:\r\n x = np.arange(0, len(Vs)*ts, ts)\r\n if mode in ['linear','nearest', 'zero', 'slinear', 'quadratic', 'cubic']:\r\n p = interp1d(x, Vs, kind=mode)\r\n elif mode[:4]=='poly':\r\n deg = str2num(mode[4:])\r\n p = np.poly1d(np.polyfit(x, Vs, deg))\r\n\r\n y_hat = p(x)\r\n\r\n return Vs - y_hat\r\n\r\ndef detectSpikes_cell_attached(Is, ts, msh=30, msd=10, basefilt=20, maxsh=300,\r\n removebase=False, **kwargs):\r\n \"\"\"Detect cell attached extracellular spikes\r\n Is: current time series\r\n ts: sampling rate (ms)\r\n msh: min spike height (Default 30pA)\r\n msd: min spike distance (Default 10ms)\r\n basefilt: baseline medfilt filter order in ms (Default 20)\r\n maxsh: maximum spike height. Helpful to remove stimulation artifacts.\r\n (Default 300)\r\n removebase: remove baseline when returning height. This will result\r\n absolute height of spike relative to the baseline. If set to false,\r\n returning the value of the spike, before filtering. (Default False)\r\n **kwargs: additional arguments for \"findpeaks\"\r\n \"\"\"\r\n # Make sure medfilt kernel size is odd\r\n # Median filter out the spikes to get baseline\r\n Base = medfilt1(Is, int(basefilt/ts/2)*2+1) # Use custom medfilt1\r\n msd = msd / ts\r\n Is = Is - Base\r\n # Invert Is because of voltage clamp mode, resulting inward current being\r\n # negative.\r\n [LOCS, PKS] = findpeaks(-Is, mph=msh, mpd=msd, **kwargs)\r\n num_spikes = len(PKS)\r\n spike_time = ind2time(LOCS, ts)\r\n # Remove peaks exceeding max height\r\n ind = np.where(PKS<maxsh)\r\n LOCS = LOCS[ind]\r\n PKS = PKS[ind]\r\n if removebase:\r\n spike_heights = PKS\r\n else:\r\n spike_heights = -PKS + Base[LOCS]\r\n return num_spikes, spike_time, spike_heights\r\n\r\n\r\ndef spk_vclamp_access_resistance(Is, Vs, ts, window=[995,1015], scalefactor=1.0, direction='up'):\r\n \"\"\"Calculate the access resistance based on capacitance artifact\r\n * Inputs:\r\n - Is: current time series (pA)\r\n - Vs: voltage step time series (mV)\r\n - ts: sampling rate (ms)\r\n - window: a window that contains the capcitance artifact, [baseline, end_of_artifact]\r\n - scalefactor: scale factor of the current time series\r\n - direction [\"up\"(default)|\"down\"]: is the artifact upswing or downswing\r\n - printResults: whether or not print the output\r\n * Outputs:\r\n - R_series: series resistance [MOhm]\r\n - tau: time constant of the exponential fit on the artifact [ms]\r\n - rsquare: adjusted R square of exponential fit on the artifact\r\n \"\"\"\r\n if window is not None:\r\n Is = spk_window(Is, ts, window)\r\n Vs = spk_window(Vs, ts, window)\r\n \r\n if direction != 'up':\r\n Is = -Is\r\n Vs = -Vs\r\n\r\n index = np.argmax(Is)\r\n Is_fit = Is[index:]\r\n Is_fit = Is_fit - np.mean(Is_fit[-5:])\r\n Ts_fit = np.arange(0, len(Is_fit)*ts, ts)\r\n \r\n # plt.plot(Ts_fit, Is_fit)\r\n # Fitting the best possible\r\n f0 = lambda x, a, b: a*np.exp(-b*x)\r\n popt1, pcov1 = curve_fit(f0, Ts_fit, Is_fit, [np.max(Is_fit), 0.5])\r\n gof1 = goodness_of_fit(Ts_fit, Is_fit, popt1, pcov1, f0)\r\n #print(gof1['adjrsquare'])\r\n #return Ts_fit, Is_fit, popt1\r\n\r\n if gof1['adjrsquare'] > 0.85:\r\n tau = 1.0 / np.abs(popt1[1]) \r\n rsquare = gof1['adjrsquare']\r\n else:\r\n f0 = lambda x, a, b, c: a*np.exp(-b*x)+c\r\n a, b, c = fit_exp_with_offset(Ts_fit, Is_fit)\r\n pcov2 = 0\r\n gof2 = goodness_of_fit(Ts_fit, Is_fit, (a,b,c), pcov2, f0)\r\n if gof2['adjrsquare'] > 0.85:\r\n tau = 1.0 / np.abs(popt2[1])\r\n rsquare = gof2['adjrsquare']\r\n else: \r\n f0 = lambda x, a, b, c, d: a*np.exp(-b*x) + c*np.exp(d*x)\r\n popt3, pcov3 = curve_fit(f0, Ts_fit, Is_fit, [np.max(Is_fit), 0.5, np.min(Is_fit), 0.5])\r\n gof3 = goodness_of_fit(Ts_fit, Is_fit, popt3, pcov3, f0)\r\n tau = np.max(1.0/np.array([popt3[1], popt3[3]]))\r\n rsquare = gof3['adjrsquare']\r\n\r\n # Integrate the current over the window to get total charge\r\n Is = Is - np.mean(Is[-5:])\r\n Q = np.sum(Is[Is>0]) * ts / scalefactor\r\n \r\n C_m = Q / np.abs(Vs[-1] - Vs[0]) # [pF]\r\n R_series = tau / C_m * 1000 # [MOhm]\r\n \r\n return R_series, tau, rsquare\r\n\r\n\r\ndef spk_get_stim(Ss, ts, longest_row=True, decimals=0):\r\n \"\"\"Serve as an example on how to extract the strongest \r\n and longest stimulus given the stimulus trace\r\n \r\n Inputs:\r\n Ss: time series of stimulus trace\r\n ts: sampling rate [seconds]\r\n Returns [start, end, intensity]\r\n \"\"\"\r\n # Get the longest stimulus\r\n if longest_row:\r\n stim_amp = np.max(Ss)\r\n stim = np.where(Ss == stim_amp)[0]\r\n consec_index = getconsecutiveindex(stim)\r\n longest_row = np.argmax(np.diff(consec_index, axis=1))\r\n stim = stim[consec_index[longest_row, :]]\r\n stim = np.round(ind2time(stim, ts), decimals=decimals)\r\n stim = np.concatenate((stim, np.asarray([stim_amp])), axis=0)\r\n else: # can have multiple stims\r\n \r\n def _extract_stim(stim, consec_index):\r\n tmp_stim = np.zeros((consec_index.shape[0], consec_index.shape[1]+1))\r\n for r in range(consec_index.shape[0]):\r\n tmp_stim[r, :2] = np.round(ind2time(stim[consec_index[r,:]], ts), decimals=decimals)\r\n tmp_stim[r, 2] = np.round(Ss[stim[(consec_index[r,0]+1):(consec_index[r, 1]-1)]].mean())\r\n \r\n return tmp_stim\r\n \r\n pos_stim = np.where(Ss>0)[0]\r\n consec_index_pos = getconsecutiveindex(pos_stim)\r\n neg_stim = np.where(Ss<0)[0]\r\n consec_index_neg = getconsecutiveindex(neg_stim)\r\n pos_stim_a = _extract_stim(pos_stim, consec_index_pos)\r\n neg_stim_a = _extract_stim(neg_stim, consec_index_neg)\r\n stim = np.concatenate((pos_stim_a, neg_stim_a), axis=0)\r\n stim, _ = sortrows(stim, col=0)\r\n \r\n return stim\r\n\r\n\r\ndef spk_get_rin(Vs, ts, neg=[], Ss=None, base_win=[-100, 0], rin_win=[-100,0], base_func=np.mean, rin_func=np.mean, relative_rin_win_end=True):\r\n \"\"\"\r\n Vs: voltage [mV]\r\n ts: sampling interval [ms]\r\n neg: a window of the Rin negative step [start, end (,intensity)], either size 2 or 3. If size 2, Ss argument must be specified\r\n Ss: time series of the same length as Vs. Needed when len(neg)==2\r\n base_win: baseline window\r\n rin_win: Rin window\r\n base_func: function applied to the base_win to extract the number. Default np.mean\r\n rin_func: function applied to the rin_win. Default np.mean\r\n relative_rin_win_end: If True: base_win is relative to neg[0], and rin_win to neg[1]\r\n \"\"\"\r\n if len(neg) == 3:\r\n Rin =(rin_func(spk_window(Vs, ts, rin_win + neg[1])) - base_func(spk_window(Vs, ts, base_win + neg[0]))) / neg[2] * 1000\r\n elif len(neg) == 2: \r\n if Ss is None:\r\n raise(Exception(\"Stimulus intensity is not specified\"))\r\n \r\n Rin = (rin_func(spk_window(Vs, ts, rin_win + neg[1])) - base_func(spk_window(Vs, ts, base_win + neg[0]))) / \\\r\n (np.mean(spk_window(Ss, ts, rin_win + neg[1])) - np.mean(spk_window(Ss, ts, base_win + neg[0]))) * 1000\r\n else:\r\n raise(Exception(\"Length of neg must be at least 2\"))\r\n \r\n return Rin\r\n \r\n\r\n# %%\r\ndef spk_time_distance(spike_time, method=\"victor&purpura\", *args, **kwargs):\r\n if method == \"victor&purpura\":\r\n spkd_victor_and_purpura(tli, tlj, cost)\r\n return\r\n\r\ndef spkd_victor_and_purpura(tli, tlj, cost=0):\r\n \"\"\"Calculate the \"spike time\" distance (Victor & Purpura 1996) for a single\r\n cost between a pair of spike trains\r\n \r\n tli: vector of spike times for first spike train\r\n tlj: vector of spike times for second spike train\r\n cost: cost per unit time to move a spike\r\n \r\n Translated from origial MATLAB script by Daniel Reich\r\n http://www-users.med.cornell.edu/~jdvicto/spkdm.html\r\n \r\n Original MATALB script license:\r\n Copyright (c) 1999 by Daniel Reich and Jonathan Victor.\r\n Translated to Matlab by Daniel Reich from FORTRAN code by Jonathan Victor.\r\n \"\"\"\r\n nspi = len(tli)\r\n nspj = len(tlj)\r\n if cost == 0:\r\n return abs(nspi-nspj)\r\n elif cost == np.inf:\r\n return nspi+nspj\r\n \r\n scr = np.zeros((nspi+1, nspj+1))\r\n # Initialize margins with cost of adding a spike\r\n scr[:, 0] = np.arange(0, nspi+1, 1)\r\n scr[0, :] = np.arange(0, nspj+1, 1)\r\n if nspi and nspj: # if neither is zero\r\n for i in range(1, nspj+1):\r\n for j in range(1, nspj+1):\r\n # Finding the minimum of adding a spike, removing a spike, or moving a spike\r\n scr[i,j]=np.min([scr[i-1,j]+1, scr[i,j-1]+1, scr[i-1,j-1]+cost*np.abs(tli[i-1]-tlj[j-1])])\r\n\r\n return scr[nspi, nspj]\r\n\r\ndef spkd_victor_purpura_interval(tli, tlj, cost=0, tsamp=2000):\r\n \"\"\"Calculates distance between two spike trains in the\r\n spike interval metric by a continuum modification of the \r\n sellers algorithm\r\n \r\n Inputs:\r\n tli: vector of spike times for first spike train\r\n tlj: vector of spike times for second spike train\r\n cost: cost per unit time to move a spike\r\n tsamp: the length of the entire interval\r\n \r\n \"\"\"\r\n \r\n nspi = len(tli) # number of spike times in train 1\r\n nspj = len(tlj) # number of spike times in train 2\r\n \r\n ni = nspi + 1 # number of intervals in train 1\r\n nj = nspj + 1 # number fo intervals in train 2\r\n scr = np.zeros((ni+1, nj+1))\r\n \r\n # Define calculation for a cost of zero\r\n if cost == 0:\r\n d = np.abs(ni-nj)\r\n return d, scr\r\n \r\n # Initialize margins with cost of adding a spike\r\n scr[:, 0] = np.arange(0, ni+1, 1)\r\n scr[0, :] = np.arange(0, nj+1, 1)\r\n \r\n tli_diff = np.diff(tli)\r\n tlj_diff = np.diff(tlj)\r\n \r\n for i in range(0, ni):\r\n if i>0 and i<ni-1: # in the middle\r\n di = tli_diff[i-1]\r\n elif i==0 and i==ni-1: # ni == 1 at the beginning\r\n di = tsamp\r\n elif i==0 and i<ni-1: # ni > 1 at the beginning\r\n di = tli[i]\r\n else: # otherwise\r\n di = tsamp - tli[i-1]\r\n \r\n iend = i==0 or i==ni-1\r\n \r\n # Unrolled loop for j = 1\r\n # -----------------------\r\n if nj == 1:\r\n dj = tsamp\r\n else: # j < nj\r\n dj = tlj[0]\r\n \r\n if iend:\r\n dist = 0\r\n else: # jend\r\n dist = np.max([0, dj-di])\r\n \r\n scr[i+1, 1] = np.min([scr[i,1]+1, scr[i+1, 0]+1, scr[i,0]+cost*dist])\r\n \r\n # Main code\r\n # -----------------------\r\n for j in range(1, nj-1):\r\n dj = tlj_diff[j-1]\r\n \r\n if iend:\r\n dist = np.max([0, di-dj])\r\n else:\r\n dist = np.abs(di-dj)\r\n \r\n scr[i+1, j+1] = np.min([scr[i, j+1]+1, scr[i+1, j]+1, scr[i,j]+cost*dist])\r\n \r\n # Unrolled loop for j = nj\r\n if nj == 0:\r\n dj = tsamp\r\n else:\r\n dj = tsamp - tlj[nj-2]\r\n \r\n if iend:\r\n dist = 0\r\n else:\r\n dist = np.max([0, dj-di])\r\n \r\n scr[i+1, nj] = np.min([scr[i, nj]+1, scr[i+1, nj-1]+1, scr[i, nj-1]+cost*dist])\r\n \r\n return scr[ni, nj]\r\n\r\ndef spk_preliminary_analysis(zData, params=['RMP', 'AHP', 'Rin', \r\n 'NumSpikes','SpikeTime', \r\n 'NumPASpikes', 'PASpikeTime']):\r\n \"\"\"General purpose analysis, with default settings\r\n * zData: data loaded\r\n * params: ['RMP', 'AHP', 'Rin', 'NumSpikes', 'SpikeTime', 'NumPASpikes', 'PASpikeTime']\r\n \"\"\"\r\n \r\n ts = zData.Protocol.msPerPoint\r\n stim = spk_get_stim(zData.Stimulus['A'], ts)\r\n neg = spk_get_stim(-zData.Stimulus['A'], ts)\r\n neg[2] = -neg[2]\r\n \r\n outputs = {'stim':stim, 'neg':neg, 'ts':ts}\r\n \r\n if 'RMP' in params:\r\n outputs['RMP'] = spk_window(zData.Voltage['A'], ts, np.array([-100, 0])+stim[0]).mean()\r\n \r\n \r\n if 'AHP' in params:\r\n outputs['AHP'] = spk_window(zData.Voltage['A'], ts, np.array([50, 200])+stim[1]).mean() - \\\r\n spk_window(zData.Voltage['A'], ts, np.array([-100, 0])+stim[0]).mean()\r\n \r\n if 'Rin' in params:\r\n outputs['Rin'] = spk_get_rin(zData.Voltage['A'], ts, neg)\r\n \r\n if 'NumSpikes' in params or 'SpikeTime' in params:\r\n outputs['NumSpikes'], outputs['SpikeTime'], _ = spk_count(zData.Voltage['A'], ts, window=stim)\r\n \r\n if 'NumPASpikes' in params or 'PASpikeTime' in params:\r\n outputs['NumPASpikes'], outputs['PASpikeTime'], _ = spk_count(zData.Voltage['A'], ts, window=[stim[1], None])\r\n \r\n return outputs\r\n \r\n\r\n \r\n \r\n \r\n# %% Simultaions\r\ndef spk_make_epsp_train(event_time, duration=10000, ts=0.1, \r\n alpha_dict={'duration':1000, 'amplitude':150, 'tau1':50, 'tau2':100}): \r\n alpha_dict['ts'] = ts\r\n \r\n T = np.arange(0, duration+ts, ts)\r\n R = spk_dirac(ts=ts, dur=[0, duration], phi=event_time, h=1., collapse=True)\r\n epsp = alpha(**alpha_dict)\r\n epsp_train = sg.convolve(R, epsp, mode=\"full\")[:len(T)] # faster\r\n return epsp_train\r\n\r\n# %%\r\nif __name__ == '__main__':\r\n from ImportData import *\r\n from matplotlib import pyplot as plt\r\n \r\n Base = 'Neocortex N.30May16.S1.E67'\r\n Similar = 'Neocortex N.30May16.S1.E71'\r\n Similar2 = 'Neocortex N.30May16.S1.E58'\r\n Different = 'Neocortex N.30May16.S1.E64'\r\n Different2 = 'Neocortex N.30May16.S1.E68'\r\n \r\n def get_spike_num(path):\r\n zData = load_trace(path)\r\n ts = zData.Protocol.msPerPoint\r\n stim = spk_get_stim(zData.Stimulus['A'], ts)\r\n Vs = spk_window(zData.Voltage['A'], ts, np.array([0, 2000]) + stim[0])\r\n _, spk_time, _ = spk_count(Vs, ts)\r\n return spk_time, Vs\r\n \r\n \r\n Base_spk, _ = get_spike_num(Base)\r\n Similar_spk,_ = get_spike_num(Similar)\r\n Similar2_spk,Vs = get_spike_num(Similar2)\r\n Different_spk,_ = get_spike_num(Different)\r\n Different2_spk,_ = get_spike_num(Different)\r\n \r\n d = spkd_victor_and_purpura(Base_spk, Similar_spk, cost=0.1)\r\n \r\n d = spkd_victor_purpura_interval(Base_spk, Similar_spk, cost=0.1, tsamp=2000)\r\n \r\n\r\n"
},
{
"alpha_fraction": 0.5063702464103699,
"alphanum_fraction": 0.5157719254493713,
"avg_line_length": 43.3505973815918,
"blob_id": "662ac6e3d4ad43c75141de2c1c97b46c5c185398",
"content_id": "56d4f5b8a2d4d7dc2e014033fb49bcd258405fa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11381,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 251,
"path": "/PySynapse/util/inkscape_deep_ungroup_all.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 9 15:56:00 2017\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nsee #inkscape on Freenode and\r\nhttps://github.com/nikitakit/svg2sif/blob/master/synfig_prepare.py#L370\r\nfor an example how to do the transform of parent to children.\r\n\"\"\"\r\n__version__ = \"0.2\" # Works but in terms of maturity, still unsure\r\nsys.path.append(\"D:/Edward/Software/inkscape-0.91-1-win64/share/extensions\")\r\n#inkscape_path='\"C:\\\\Program Files\\\\Inkscape\\\\inkscape.exe\"'\r\n\r\nfrom inkex import addNS\r\nimport logging\r\nimport simplestyle\r\nimport simpletransform\r\nlogging.basicConfig(format='%(levelname)s:%(funcName)s:%(message)s',\r\n level=logging.INFO)\r\ntry:\r\n import inkex\r\nexcept ImportError:\r\n raise ImportError(\"\"\"No module named inkex in {0}.\"\"\".format(__file__))\r\ntry:\r\n from numpy import matrix\r\nexcept:\r\n raise ImportError(\"\"\"Cannot find numpy.matrix in {0}.\"\"\".format(__file__))\r\nSVG_NS = \"http://www.w3.org/2000/svg\"\r\nINKSCAPE_NS = \"http://www.inkscape.org/namespaces/inkscape\"\r\nclass Ungroup(inkex.Effect):\r\n def __init__(self):\r\n inkex.Effect.__init__(self)\r\n self.OptionParser.add_option(\"-s\", \"--startdepth\",\r\n action=\"store\", type=\"int\",\r\n dest=\"startdepth\", default=0,\r\n help=\"starting depth for ungrouping\")\r\n self.OptionParser.add_option(\"-m\", \"--maxdepth\",\r\n action=\"store\", type=\"int\",\r\n dest=\"maxdepth\", default=65535,\r\n help=\"maximum ungrouping depth\")\r\n self.OptionParser.add_option(\"-k\", \"--keepdepth\",\r\n action=\"store\", type=\"int\",\r\n dest=\"keepdepth\", default=0,\r\n help=\"levels of ungrouping to \" +\r\n \"leave untouched\")\r\n def _get_dimension(s=\"1024\"):\r\n \"\"\"Convert an SVG length string from arbitrary units to pixels\"\"\"\r\n if s == \"\":\r\n return 0\r\n try:\r\n last = int(s[-1])\r\n except:\r\n last = None\r\n if type(last) == int:\r\n return float(s)\r\n elif s[-1] == \"%\":\r\n return 1024\r\n elif s[-2:] == \"px\":\r\n return float(s[:-2])\r\n elif s[-2:] == \"pt\":\r\n return float(s[:-2]) * 1.25\r\n elif s[-2:] == \"em\":\r\n return float(s[:-2]) * 16\r\n elif s[-2:] == \"mm\":\r\n return float(s[:-2]) * 3.54\r\n elif s[-2:] == \"pc\":\r\n return float(s[:-2]) * 15\r\n elif s[-2:] == \"cm\":\r\n return float(s[:-2]) * 35.43\r\n elif s[-2:] == \"in\":\r\n return float(s[:-2]) * 90\r\n else:\r\n return 1024\r\n def _merge_transform(self, node, transform):\r\n \"\"\"Propagate style and transform to remove inheritance\r\n Originally from\r\n https://github.com/nikitakit/svg2sif/blob/master/synfig_prepare.py#L370\r\n \"\"\"\r\n # Compose the transformations\r\n if node.tag == addNS(\"svg\", \"svg\") and node.get(\"viewBox\"):\r\n vx, vy, vw, vh = [self._get_dimension(x)\r\n for x in node.get(\"viewBox\").split()]\r\n dw = self._get_dimension(node.get(\"width\", vw))\r\n dh = self._get_dimension(node.get(\"height\", vh))\r\n t = (\"translate(%f, %f) scale(%f, %f)\" %\r\n (-vx, -vy, dw / vw, dh / vh))\r\n this_transform = simpletransform.parseTransform(\r\n t, transform)\r\n this_transform = simpletransform.parseTransform(\r\n node.get(\"transform\"), this_transform)\r\n del node.attrib[\"viewBox\"]\r\n else:\r\n this_transform = simpletransform.parseTransform(node.get(\r\n \"transform\"), transform)\r\n # Set the node's transform attrib\r\n node.set(\"transform\",\r\n simpletransform.formatTransform(this_transform))\r\n def _merge_style(self, node, style):\r\n \"\"\"Propagate style and transform to remove inheritance\r\n Originally from\r\n https://github.com/nikitakit/svg2sif/blob/master/synfig_prepare.py#L370\r\n \"\"\"\r\n # Compose the style attribs\r\n this_style = simplestyle.parseStyle(node.get(\"style\", \"\"))\r\n remaining_style = {} # Style attributes that are not propagated\r\n # Filters should remain on the top ancestor\r\n non_propagated = [\"filter\"]\r\n for key in non_propagated:\r\n if key in this_style.keys():\r\n remaining_style[key] = this_style[key]\r\n del this_style[key]\r\n # Create a copy of the parent style, and merge this style into it\r\n parent_style_copy = style.copy()\r\n parent_style_copy.update(this_style)\r\n this_style = parent_style_copy\r\n # Merge in any attributes outside of the style\r\n style_attribs = [\"fill\", \"stroke\"]\r\n for attrib in style_attribs:\r\n if node.get(attrib):\r\n this_style[attrib] = node.get(attrib)\r\n del node.attrib[attrib]\r\n if (node.tag == addNS(\"svg\", \"svg\")\r\n or node.tag == addNS(\"g\", \"svg\")\r\n or node.tag == addNS(\"a\", \"svg\")\r\n or node.tag == addNS(\"switch\", \"svg\")):\r\n # Leave only non-propagating style attributes\r\n if len(remaining_style) == 0:\r\n if \"style\" in node.keys():\r\n del node.attrib[\"style\"]\r\n else:\r\n node.set(\"style\", simplestyle.formatStyle(remaining_style))\r\n else:\r\n # This element is not a container\r\n # Merge remaining_style into this_style\r\n this_style.update(remaining_style)\r\n # Set the element's style attribs\r\n node.set(\"style\", simplestyle.formatStyle(this_style))\r\n def _merge_clippath(self, node, clippathurl):\r\n if (clippathurl):\r\n node_transform = simpletransform.parseTransform(\r\n node.get(\"transform\"))\r\n if (node_transform):\r\n # Clip-paths on nodes with a transform have the transform\r\n # applied to the clipPath as well, which we don't want. So, we\r\n # create new clipPath element with references to all existing\r\n # clippath subelements, but with the inverse transform applied\r\n inverse_node_transform = simpletransform.formatTransform(\r\n self._invert_transform(node_transform))\r\n new_clippath = inkex.etree.SubElement(\r\n self.xpathSingle('//svg:defs'), 'clipPath',\r\n {'clipPathUnits': 'userSpaceOnUse',\r\n 'id': self.uniqueId(\"clipPath\")})\r\n clippath = self.getElementById(clippathurl[5:-1])\r\n for c in (clippath.iterchildren()):\r\n inkex.etree.SubElement(\r\n new_clippath, 'use',\r\n {inkex.addNS('href', 'xlink'): '#' + c.get(\"id\"),\r\n 'transform': inverse_node_transform,\r\n 'id': self.uniqueId(\"use\")})\r\n # Set the clippathurl to be the one with the inverse transform\r\n clippathurl = \"url(#\" + new_clippath.get(\"id\") + \")\"\r\n # Reference the parent clip-path to keep clipping intersection\r\n # Find end of clip-path chain and add reference there\r\n node_clippathurl = node.get(\"clip-path\")\r\n while (node_clippathurl):\r\n node = self.getElementById(node_clippathurl[5:-1])\r\n node_clippathurl = node.get(\"clip-path\")\r\n node.set(\"clip-path\", clippathurl)\r\n def _invert_transform(self, transform):\r\n # duplicate list to avoid modifying it\r\n return matrix(transform + [[0, 0, 1]]).I.tolist()[0:2]\r\n # Flatten a group into same z-order as parent, propagating attribs\r\n def _ungroup(self, node):\r\n node_parent = node.getparent()\r\n node_index = list(node_parent).index(node)\r\n node_style = simplestyle.parseStyle(node.get(\"style\"))\r\n node_transform = simpletransform.parseTransform(node.get(\"transform\"))\r\n node_clippathurl = node.get('clip-path')\r\n for c in reversed(list(node)):\r\n self._merge_transform(c, node_transform)\r\n self._merge_style(c, node_style)\r\n self._merge_clippath(c, node_clippathurl)\r\n node_parent.insert(node_index, c)\r\n node_parent.remove(node)\r\n # Put all ungrouping restrictions here\r\n def _want_ungroup(self, node, depth, height):\r\n if (node.tag == addNS(\"g\", \"svg\") and\r\n node.getparent() is not None and\r\n height > self.options.keepdepth and\r\n depth >= self.options.startdepth and\r\n depth <= self.options.maxdepth):\r\n return True\r\n return False\r\n def _deep_ungroup(self, node):\r\n # using iteration instead of recursion to avoid hitting Python\r\n # max recursion depth limits, which is a problem in converted PDFs\r\n # Seed the queue (stack) with initial node\r\n q = [{'node': node,\r\n 'depth': 0,\r\n 'prev': {'height': None},\r\n 'height': None}]\r\n while q:\r\n current = q[-1]\r\n node = current['node']\r\n depth = current['depth']\r\n height = current['height']\r\n # Recursion path\r\n if (height is None):\r\n # Don't enter non-graphical portions of the document\r\n if (node.tag == addNS(\"namedview\", \"sodipodi\")\r\n or node.tag == addNS(\"defs\", \"svg\")\r\n or node.tag == addNS(\"metadata\", \"svg\")\r\n or node.tag == addNS(\"foreignObject\", \"svg\")):\r\n q.pop()\r\n # Base case: Leaf node\r\n if (node.tag != addNS(\"g\", \"svg\") or not len(node)):\r\n current['height'] = 0\r\n # Recursive case: Group element with children\r\n else:\r\n depth += 1\r\n for c in node.iterchildren():\r\n q.append({'node': c, 'prev': current,\r\n 'depth': depth, 'height': None})\r\n # Return path\r\n else:\r\n # Ungroup if desired\r\n if (self._want_ungroup(node, depth, height)):\r\n self._ungroup(node)\r\n # Propagate (max) height up the call chain\r\n height += 1\r\n previous = current['prev']\r\n prev_height = previous['height']\r\n if (prev_height is None or prev_height < height):\r\n previous['height'] = height\r\n # Only process each node once\r\n q.pop()\r\n def effect(self):\r\n if len(self.selected):\r\n for elem in self.selected.itervalues():\r\n self._deep_ungroup(elem)\r\n else:\r\n for elem in self.document.getroot():\r\n self._deep_ungroup(elem)\r\nif __name__ == '__main__':\r\n effect = Ungroup()\r\n effect.affect()"
},
{
"alpha_fraction": 0.7233766317367554,
"alphanum_fraction": 0.7402597665786743,
"avg_line_length": 43.411766052246094,
"blob_id": "7858ba4c871c34e57110a7dc02f66c35487f136c",
"content_id": "ecf2bd4a129a82930d1f0135b4430b1d2641edbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 770,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 17,
"path": "/python_tutorials/ThinkPython/practice_notes_3_case_study.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\r\n# Day 3: November 25, 2012\r\n# Case Study Script\r\n\r\nopen_file = open('words.txt');#create a file handle for the text file\r\nopen_file.readline();#should return the first line in the document\r\n#>>>'aa\\n'\r\n#where \\n is the new line delimiter in Windows\r\n#in Linux, it should be \\r\\n\r\nopen_file.readline();#a second time calling .readline() method should read the next line\r\nreadLine=open_file.readline();#store the read line in a variable\r\n#readWord=readLine.strip();#this should strip the annoying \\n delimiter in Python 2\r\nprint(readWord);\r\n#However, in Python 3, it looks like once .readline is stored in a variable\r\n#it automatically strip the delimiter, thus .strip is not available / useless in Python 3\r\n\r\n#This concludes the case study"
},
{
"alpha_fraction": 0.5493642687797546,
"alphanum_fraction": 0.5646970868110657,
"avg_line_length": 26.5670108795166,
"blob_id": "672cd0c58bd19647ec7166da11993e2c61bbdc07",
"content_id": "243bb005f50a463080f2748beea49da19170f8ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2674,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 97,
"path": "/Spikes/spikedetekt2/spikedetekt2/core/progressbar.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "'''\nThis file is used to report the amount of time elapsed, and how much remains.\n\nUsed like this::\n\n progress = ProgressReporter()\n progress.start()\n for ...:\n ... do some work ...\n progress.update(percentcomplete)\n progress.finish()\n\nIt will give a progress report every 60 seconds by default.\n \nAdapted and simplified from progressreporting.py in the Brian neural network\nsimulator (http://briansimulator.org).\n'''\nimport sys, time\nfrom kwiklib import info\n\n__all__ = ['ProgressReporter']\n\ndef time_rep(t):\n '''\n Textual representation of time t given in seconds\n '''\n t = int(t)\n if t < 60:\n return str(t) + 's'\n secs = t % 60\n mins = t // 60\n if mins < 60:\n return str(mins) + 'm ' + str(secs) + 's'\n mins = mins % 60\n hours = t // (60 * 60)\n if hours < 24:\n return str(hours) + 'h ' + str(mins) + 'm ' + str(secs) + 's'\n hours = hours % 24\n days = t // (60 * 60 * 24)\n return str(days) + 'd ' + str(hours) + 'h ' + str(mins) + 'm ' + str(secs) + 's'\n\ndef make_text_report(elapsed, complete):\n s = str(int(100 * complete)) + '% complete, '\n s += time_rep(elapsed) + ' elapsed'\n if complete > .001:\n remtime = elapsed / complete - elapsed\n s += ', ' + time_rep(remtime) + ' remaining.'\n else:\n s += '.'\n return s\n\nclass ProgressReporter(object):\n '''\n Standard text progress reports\n \n Initialised with an optional argument:\n \n ``period``\n How often reports should be generated in seconds.\n \n Methods:\n \n .. method:: start()\n \n Call at the beginning of a task to start timing it.\n \n .. method:: finish()\n \n Call at the end of a task to finish timing it.\n \n .. method:: update(complete[, extrainfo])\n \n Call with the fraction of the task completed, between 0 and 1, and\n the optional extrainfo parameters is a string giving extra information\n about the progress. \n '''\n def __init__(self, period=60.0):\n self.period = float(period)\n self.start()\n\n def start(self):\n self.start_time = time.time()\n self.next_report_time = self.start_time\n\n def finish(self):\n self.update(1)\n\n def update(self, complete, extrainfo=''):\n cur_time = time.time()\n if (cur_time > self.next_report_time) or \\\n complete == 1.0 or complete == 1:\n self.next_report_time = cur_time + self.period\n elapsed = time.time() - self.start_time\n s = make_text_report(elapsed, complete)\n if extrainfo:\n s += ' '+extrainfo\n info(s)\n"
},
{
"alpha_fraction": 0.52193284034729,
"alphanum_fraction": 0.535272479057312,
"avg_line_length": 42.77447509765625,
"blob_id": "1ae1ec038f6486204cf27daba80a04697cb9d656",
"content_id": "bd9979941b3603d06d4fd227499472bd79aa09d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 36133,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 807,
"path": "/PySynapse/util/ExportData.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 20 09:55:23 2016\r\n\r\nData export utility of Synapse\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nimport numpy as np\r\nfrom scipy.signal import butter, filtfilt\r\nimport matplotlib\r\nmatplotlib.use(\"PS\")\r\nmatplotlib.rcParams['svg.fonttype'] = 'none'\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.font_manager as fm\r\nfrom matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker, AuxTransformBox\r\nimport matplotlib.ticker as tic\r\n\r\n# matplotlib.rcParams['pdf.fonttype'] = 42\r\n# matplotlib.rcParams['ps.fonttype'] = 42\r\n\r\nfrom pdb import set_trace\r\n\r\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\r\nsys.path.append(os.path.join(__location__, '..')) # for debug only\r\nfrom util.spk_util import *\r\nfrom util.svg2eps import *\r\n\r\n# %%\r\n# Helper functions\r\ndef SetFont(ax, fig, fontsize=12,fontname='Arial',items=None):\r\n \"\"\"Change font properties of all axes\r\n ax: which axis or axes to change the font. Default all axis in current\r\n instance. To skip axis, input as [].\r\n fig: figure handle to change the font (text in figure, not in axis).\r\n Default is any text items in current instance. To skip, input as [].\r\n fontsize: size of the font, specified in the global variable\r\n fontname: fullpath of the font, specified in the global variable\r\n items: select a list of items to change font. ['title', 'xlab','ylab',\r\n 'xtick','ytick', 'texts','legend','legendtitle','textartist']\r\n \"\"\" \r\n def unpack_anchor_offsetbox(box):\r\n \"\"\"Getting only text area items from the anchor offset box\"\"\"\r\n itemList = []\r\n counter = 0\r\n maxiter=100 # terminate at this iteration\r\n def unpacker(box):\r\n return box.get_children()\r\n\r\n # vectorize\r\n unpacker = np.frompyfunc(unpacker, 1,1)\r\n # Get the children\r\n while counter<maxiter and box:\r\n # recursively unpack the anchoroffsetbox or v/hpacker\r\n box = np.hstack(unpacker(box)).tolist()\r\n for nn, b in enumerate(box):\r\n if 'matplotlib.text.Text' in str(type(b)):\r\n itemList.append(b)\r\n box[nn] = None\r\n # remove recorded\r\n box = [b for b in box if b is not None]\r\n counter += 1\r\n\r\n return itemList\r\n\r\n def get_ax_items(ax):\r\n \"\"\"Parse axis items\"\"\"\r\n itemDict={'title':[ax.title], 'xlab':[ax.xaxis.label],\r\n 'ylab':[ax.yaxis.label], 'xtick':ax.get_xticklabels(),\r\n 'ytick':ax.get_yticklabels(),\r\n 'xminortick': ax.get_xminorticklabels(),\r\n 'yminortick': ax.get_yminorticklabels(),\r\n 'texts':ax.texts if isinstance(ax.texts,(np.ndarray,list))\r\n else [ax.texts],\r\n 'legend': [] if not ax.legend_\r\n else ax.legend_.get_texts(),\r\n 'legendtitle':[] if not ax.legend_\r\n else [ax.legend_.get_title()],\r\n 'textartist':[] if not ax.artists\r\n else unpack_anchor_offsetbox(ax.artists)}\r\n itemList, keyList = [], []\r\n if items is None: # get all items\r\n for k, v in iter(itemDict.items()):\r\n itemList += v\r\n keyList += [k]*len(v)\r\n else: # get only specified item\r\n for k in items:\r\n itemList += itemDict[k] # add only specified in items\r\n keyList += [k]*len(itemDict[k])\r\n\r\n return(itemList, keyList)\r\n\r\n def get_fig_items(fig):\r\n \"\"\"Parse figure text items\"\"\"\r\n itemList = fig.texts if isinstance(fig.texts,(np.ndarray,list)) \\\r\n else [fig.texts]\r\n keyList = ['texts'] * len(itemList)\r\n\r\n return(itemList, keyList)\r\n\r\n def CF(itemList, keyList):\r\n \"\"\"Change font given item\"\"\"\r\n # initialize fontprop object\r\n fontprop = fm.FontProperties(style='normal', weight='normal',\r\n stretch = 'normal')\r\n if os.path.isfile(fontname): # check if font is a file\r\n fontprop.set_file(fontname)\r\n else:# check if the name of font is available in the system\r\n if not fontname.lower() in [f.name.lower() for f in fm.fontManager.ttflist] and \\\r\n not fontname.lower() in [f.name.lower() for f in fm.fontManager.afmlist]:\r\n #any([fontname.lower() in a.lower() for a in\r\n #fm.findSystemFonts(fontpaths=None, fontext='ttf')]):\r\n print('Cannot find specified font: %s' %(fontname))\r\n fontprop.set_family(fontname) # set font name\r\n # set font for each object\r\n for n, item in enumerate(itemList):\r\n if isinstance(fontsize, dict):\r\n if keyList[n] in fontsize.keys():\r\n fontprop.set_size(fontsize[keyList[n]])\r\n elif n < 1: # set the properties only once\r\n fontprop.set_size(fontsize)\r\n item.set_fontproperties(fontprop) # change font for all items\r\n\r\n\r\n def CF_ax(ax): # combine CF and get_ax_items\r\n if not ax: # true when empty or None\r\n return # skip axis font change\r\n itemList, keyList = get_ax_items(ax)\r\n CF(itemList, keyList)\r\n\r\n def CF_fig(fig): # combine CF and get_fig_items\r\n if not fig: # true when empty or None\r\n return # skip figure font change\r\n itemsList, keyList = get_fig_items(fig)\r\n CF(itemsList, keyList)\r\n\r\n # vecotirze the closure\r\n CF_ax_vec = np.frompyfunc(CF_ax, 1,1)\r\n CF_fig_vec = np.frompyfunc(CF_fig, 1,1)\r\n\r\n # Do the actual font change\r\n CF_ax_vec(ax)\r\n CF_fig_vec(fig)\r\n\r\n\r\ndef AdjustAxs(otypes=[np.ndarray], excluded=None):\r\n \"\"\"Used as a decorator to set the axis properties\"\"\"\r\n def wrap(func):\r\n # vectorize the func so that it can be applied to single axis or\r\n # multiple axes\r\n func_vec = np.vectorize(func, otypes=otypes, excluded=excluded)\r\n def wrapper(ax, *args, **kwargs):\r\n res = func_vec(ax, *args, **kwargs)\r\n return(res)\r\n return(wrapper)\r\n return(wrap)\r\n\r\ndef SetAxisOrigin(ax, xcenter='origin', ycenter='origin', xspine='bottom', yspine='left'):\r\n \"\"\"Set the origin of the axis\"\"\"\r\n if xcenter == 'origin':\r\n xtick = ax.get_xticks()\r\n if max(xtick)<0:\r\n xcenter = max(xtick)\r\n elif min(xtick)>0:\r\n xcenter = min(xtick)\r\n else:\r\n xcenter = 0\r\n\r\n if ycenter == 'origin':\r\n ytick = ax.get_yticks()\r\n if max(ytick)<0:\r\n ycenter = max(ytick)\r\n elif min(ytick)>0:\r\n ycenter = min(ycenter)\r\n else:\r\n ycenter = 0\r\n\r\n xoffspine = 'top' if xspine == 'bottom' else 'bottom'\r\n yoffspine = 'right' if yspine=='left' else 'left'\r\n\r\n ax.spines[xspine].set_position(('data', ycenter))\r\n ax.spines[yspine].set_position(('data', xcenter))\r\n ax.spines[xoffspine].set_visible(False)\r\n ax.spines[yoffspine].set_visible(False)\r\n ax.xaxis.set_ticks_position(xspine)\r\n ax.yaxis.set_ticks_position(yspine)\r\n ax.spines[xspine].set_capstyle('butt')\r\n ax.spines[yspine].set_capstyle('butt')\r\n\r\ndef AdjustText(txt, ax=None):\r\n \"\"\"Adjust text so that it is not being cutoff\"\"\"\r\n #renderer = self.axs.get_renderer_cache()\r\n if ax is None: ax = plt.gca()\r\n txt.set_bbox(dict(facecolor='w', alpha=0, boxstyle='round, pad=1'))\r\n # plt.draw() # update the text draw\r\n txtbb = txt.get_bbox_patch().get_window_extent() # can specify render\r\n xmin, ymin, xmax, ymax = tuple(ax.transData.inverted().\r\n transform(txtbb).ravel())\r\n xbnd, ybnd = ax.get_xbound(), ax.get_ybound()\r\n if xmax > xbnd[-1]:\r\n ax.set_xbound(xbnd[0], xmax)\r\n if xmin < xbnd[0]:\r\n ax.set_xbound(xmin, xbnd[-1])\r\n if ymax > ybnd[-1]:\r\n ax.set_ybound(ybnd[0], ymax)\r\n if ymin < ybnd[0]:\r\n ax.set_ybound(ymin, ybnd[-1])\r\n \r\ndef roundto125(x, r=np.array([1,2,5,10])): # helper static function\r\n \"\"\"5ms, 10ms, 20ms, 50ms, 100ms, 200ms, 500ms, 1s, 2s, 5s, etc.\r\n 5mV, 10mV, 20mV, etc.\r\n 5pA, 10pA, 20pA, 50pA, etc.\r\n \"\"\"\r\n p = int(np.floor(np.log10(x))) # power of 10\r\n y = r[(np.abs(r-x/(10**p))).argmin()] # find closest value\r\n return(y*(10**p))\r\n\r\n\r\ndef butterFilter(y, Order, Wn, Btype=\"low\"):\r\n b, a = butter(Order, Wn, btype=Btype, analog=False, output='ba')\r\n y_filt = filtfilt(b, a, y)\r\n return y_filt\r\n\r\ndef AddTraceScaleBar(xunit, yunit, color='k',linewidth=None,\\\r\n fontsize=None, ax=None, xscale=None, yscale=None,\r\n loc=5, bbox_to_anchor=None):\r\n \"\"\"Add scale bar on trace. Specifically designed for voltage /\r\n current / stimulus vs. time traces.\r\n xscale, yscale: add the trace bar to the specified window of x and y.\r\n \"\"\"\r\n if ax is None: ax=plt.gca()\r\n def scalebarlabel(x, unitstr):\r\n x = int(x)\r\n if unitstr.lower()[0] == 'm':\r\n return(str(x)+\" \" + unitstr if x<1000 else str(int(x/1000))+ \" \" +\r\n unitstr.replace('m',''))\r\n elif unitstr.lower()[0] == 'p':\r\n return(str(x)+\" \"+ unitstr if x<1000 else str(int(x/1000))+ \" \" +\r\n unitstr.replace('p','n'))\r\n else: # no prefix\r\n return(str(x)+\" \" + unitstr)\r\n\r\n ax.set_axis_off() # turn off axis\r\n X = np.ptp(ax.get_xlim()) if xscale is None else xscale\r\n Y = np.ptp(ax.get_ylim()) if yscale is None else yscale\r\n # calculate scale bar unit length\r\n X, Y = roundto125(X/5), roundto125(Y/(5 if Y<1200 else 10))\r\n # Parse scale bar labels\r\n xlab, ylab = scalebarlabel(X, xunit), scalebarlabel(Y, yunit)\r\n # Get color of the scalebar\r\n if color is None:\r\n color = ax.get_lines()[0]\r\n if 'matplotlib.lines.Line2D' in str(type(color)):\r\n color = color.get_color()\r\n if linewidth is None:\r\n try:\r\n linewidth = ax.get_lines()[0]\r\n except:\r\n linewidth=0.70\r\n #raise(AttributeError('Did not find any line in this axis. Please explicitly specify the linewidth'))\r\n if 'matplotlib.lines.Line2D' in str(type(linewidth)):\r\n linewidth = linewidth.get_linewidth()\r\n # print(linewidth)\r\n if fontsize is None:\r\n fontsize = ax.yaxis.get_major_ticks()[2].label.get_fontsize()\r\n scalebarBox = AuxTransformBox(ax.transData)\r\n scalebarBox.add_artist(matplotlib.patches.Rectangle((0, 0), X, 0, fc=\"none\", edgecolor='k', linewidth=linewidth, joinstyle='miter', capstyle='projecting')) #TODO capstyle\r\n scalebarBox.add_artist(matplotlib.patches.Rectangle((X, 0), 0, Y, fc=\"none\", edgecolor='k', linewidth=linewidth, joinstyle='miter', capstyle='projecting'))\r\n scalebarBox.add_artist(matplotlib.text.Text(X/2, -Y/20, xlab, va='top', ha='center', color='k'))\r\n scalebarBox.add_artist(matplotlib.text.Text(X+X/20, Y/2, ylab, va='center', ha='left', color='k'))\r\n anchored_box = AnchoredOffsetbox(loc=loc, pad=-9, child=scalebarBox, frameon=False, bbox_to_anchor=bbox_to_anchor)\r\n ax.add_artist(anchored_box)\r\n return(anchored_box)\r\n\r\ndef DrawAnnotationArtists(artist_dict, axs):\r\n \"\"\"Draw the same annotation objects displayed on the graphics window \r\n when exporting to matplotlib figures\r\n * ann_dict: dictionaries of each artist\r\n \"\"\"\r\n # TODO\r\n for key, artist in artist_dict.items():\r\n # Find out which axis to draw on\r\n ax = axs[artist['layout'][2]]\r\n if isinstance(ax, list):\r\n ax = ax[artist['layout'][3]]\r\n if artist['type'] == 'box':\r\n mpl_artist = matplotlib.patches.Rectangle((artist['x0'], artist['y0']), artist['width'], artist['height'],\r\n ec=artist['linecolor'] if artist['line'] else 'none',\r\n linewidth=artist['linewidth'] if artist['line'] else None, linestyle=artist['linestyle'],\r\n fc=artist['fillcolor'],fill=artist['fill'],\r\n joinstyle='miter',capstyle='projecting')\r\n #set_trace()\r\n #mpl_artist = matplotlib.patches.Rectangle((100, 0), 500, 10)\r\n\r\n ax.add_patch(mpl_artist)\r\n elif artist['type'] == 'line':\r\n mpl_artist = matplotlib.lines.Line2D([artist['x0'], artist['x1']], [artist['y0'], artist['y1']],\r\n color=artist['linecolor'], linewidth=artist['linewidth'],\r\n linestyle=artist['linestyle'],\r\n solid_joinstyle='miter', solid_capstyle='projecting',\r\n dash_joinstyle='miter', dash_capstyle='projecting')\r\n ax.add_artist(mpl_artist)\r\n elif artist['type'] == 'circle':\r\n pass\r\n elif artist['type'] == 'arrow':\r\n pass\r\n elif artist['type'] == 'symbol':\r\n pass\r\n elif artist['type'] == 'curve':\r\n mpl_artist = matplotlib.lines.Line2D(artist['x'], artist['y'], color=artist['linecolor'],\r\n linewidth=0.5669291338582677, solid_joinstyle='bevel',\r\n solid_capstyle='butt')\r\n ax.add_artist(mpl_artist)\r\n elif artist['type'] == 'event':\r\n ax.text(float(artist['eventTime'][0] - 0.1 * np.diff(ax.get_xlim())),\r\n float(np.mean(artist['y'])),\r\n '{:d} APs'.format(len(artist['eventTime'])),\r\n color=artist['linecolor'], va='center', ha='left')\r\n\r\n for et in artist['eventTime']:\r\n mpl_artist = matplotlib.lines.Line2D([et, et], artist['y'], color=artist['linecolor'],\r\n linewidth=0.5669291338582677, solid_joinstyle='bevel',\r\n solid_capstyle='butt')\r\n ax.add_artist(mpl_artist)\r\n\r\n elif artist['type'] == 'ttl':\r\n pass\r\n else:\r\n pass\r\n\r\n # Add the artist to the plot\r\n #ax.add_artist(mpl_artist)\r\n\r\n\r\n@AdjustAxs()\r\ndef TurnOffAxis(ax):\r\n \"\"\"Turn off all axis\"\"\"\r\n ax.spines['left'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['bottom'].set_visible(False)\r\n ax.xaxis.set_visible(False)\r\n ax.yaxis.set_visible(False)\r\n\r\ndef writeEpisodeNote(zData, viewRange, channels, initFunc=None, mode='Simple'):\r\n if initFunc is None:\r\n initFunc = lambda x: x[0]\r\n\r\n ts = zData.Protocol.msPerPoint\r\n notes = []\r\n # Make notes for each channel \r\n for ch in channels:\r\n V = initFunc(spk_window(getattr(zData, 'Voltage')[ch], ts, viewRange))\r\n I = initFunc(spk_window(getattr(zData, 'Current')[ch], ts, viewRange))\r\n notes.append(\"Channel %s %.1f mV %d pA\"%(ch, V , I ))\r\n # notes.append(\"Channel %s %.1f mV %d pA\"%(ch, min(getattr(zData, 'Voltage')[ch]), min(getattr(zData, 'Current')[ch]) ))\r\n if mode.lower() == 'simple' and zData.Protocol.acquireComment != 'PySynapse Arithmetic Data':\r\n final_notes = os.path.basename(os.path.splitext(zData.Protocol.readDataFrom)[0]) + ' ' + ' '.join(notes) + ' WCTime: ' + zData.Protocol.WCtimeStr + ' min'\r\n elif mode.lower() == 'label only':\r\n final_notes = os.path.basename(os.path.splitext(zData.Protocol.readDataFrom)[0])\r\n else: # Full\r\n final_notes = zData.Protocol.readDataFrom + ' ' + ' '.join(notes) + ' WCTime: ' + zData.Protocol.WCtimeStr + ' min'\r\n return final_notes\r\n# %%\r\ndef PlotTraces(df, index, viewRange, saveDir, colorfy=False, artists=None, dpi=300, fig_size=None,\r\n adjustFigH=True, adjustFigW=True, nullRange=None, annotation='Simple', showInitVal=True,\r\n setFont='default', fontSize=10, linewidth=1.0, monoStim=False, stimReflectCurrent=True,\r\n plotStimOnce=False, filterDict=None, **kwargs):\r\n \"\"\"Export multiple traces overlapping each other\"\"\" \r\n # np.savez('R:/tmp.npz', df=df, index=index, viewRange=[viewRange], saveDir=saveDir, colorfy=colorfy)\r\n # return\r\n # set_trace()\r\n # Start the figure\r\n # viewRange= {(channel, stream):[[xmin,xmax],[ymin),ymax]]}\r\n nchannels = len(viewRange.keys())\r\n if not fig_size: # if not specified size, set to (4, 4*nchannels)\r\n fig_size = (4, 4*nchannels)\r\n\r\n if not colorfy:\r\n colorfy=['k']\r\n fig, _ = plt.subplots(nrows=nchannels, ncols=1, sharex=True)\r\n ax = fig.get_axes()\r\n # text annotation area\r\n textbox = []\r\n for n, i in enumerate(index):\r\n zData = df['Data'][i]\r\n ts = zData.Protocol.msPerPoint\r\n channels = []\r\n \r\n for c, m in enumerate(viewRange.keys()):\r\n # Draw plots\r\n X = zData.Time\r\n Y = getattr(zData, m[0])[m[1]]\r\n # null the trace, but ignore arithmetic data since their data were already nulled\r\n if nullRange is not None and zData.Protocol.acquireComment != 'PySynapse Arithmetic Data':\r\n if isinstance(nullRange, list):\r\n Y = Y - np.mean(spk_window(Y, ts, nullRange))\r\n else: # a single number\r\n Y = Y - Y[time2ind(nullRange, ts)]\r\n \r\n # window the plot\r\n X = spk_window(X, ts, viewRange[m][0])\r\n Y = spk_window(Y, ts, viewRange[m][0])\r\n\r\n # Apply filter if toggled filtering, but do not filter stimulus\r\n if isinstance(filterDict, dict) and m[0]!='Stimulus':\r\n Y = butterFilter(Y, filterDict['order'], filterDict['wn'], filterDict['btype'])\r\n\r\n # Stim channel reflects current channel\r\n if stimReflectCurrent and m[0]=='Stimulus':\r\n CurBase = spk_window(zData.Current[m[1]], ts, viewRange[m][0]) # use view range of stimulus on current\r\n CurBase = np.mean(spk_window(CurBase, ts, [0,50]))\r\n Y = Y + CurBase\r\n \r\n # do the plot\r\n if m[0] in ['Voltage', 'Current'] or not monoStim:\r\n ax[c].plot(X, Y, color=colorfy[n%len(colorfy)], lw=linewidth, solid_joinstyle='bevel', solid_capstyle='butt')\r\n else: # Stimulus\r\n if plotStimOnce and n > 0:\r\n pass\r\n else:\r\n ax[c].plot(X, Y, color='k', lw=linewidth, solid_joinstyle='bevel', solid_capstyle='butt')\r\n # Draw initial value\r\n if showInitVal:\r\n InitVal = \"{0:0.0f}\".format(Y[0])\r\n if m[0] == 'Voltage':\r\n InitVal += ' mV'\r\n elif m[0] == 'Current':\r\n InitVal += 'pA'\r\n elif m[0] == 'Stimulus':\r\n if stimReflectCurrent:\r\n InitVal += ' pA'\r\n else:\r\n InitVal = ''\r\n\r\n ax[c].text(X[0]-0.03*(viewRange[m][0][1]-viewRange[m][0][0]), Y[0]-1, InitVal, ha='right', va='center', color=colorfy[n%len(colorfy)])\r\n\r\n if m[1] not in channels:\r\n channels.append(m[1])\r\n\r\n if annotation.lower() != 'none':\r\n final_notes = writeEpisodeNote(zData, viewRange[m][0], channels=channels, mode=annotation)\r\n # Draw more annotations\r\n textbox.append(TextArea(final_notes, minimumdescent=False, textprops=dict(color=colorfy[n%len(colorfy)])))\r\n \r\n # Group all the episode annotation text\r\n if annotation.lower() != 'none':\r\n box = VPacker(children=textbox, align=\"left\", pad=0, sep=2)\r\n annotationbox = AnchoredOffsetbox(loc=3, child=box, frameon=False, bbox_to_anchor=[1, 1.1])\r\n ax[-1].add_artist(annotationbox)\r\n scalebar = [annotationbox]\r\n else:\r\n scalebar = []\r\n\r\n # Draw annotation artists\r\n DrawAnnotationArtists(artists, axs=ax)\r\n\r\n # set axis\r\n for c, vr in enumerate(viewRange.items()):\r\n l, r = vr\r\n ax[c].set_xlim(r[0])\r\n ax[c].set_ylim(r[1])\r\n # Add scalebar\r\n scalebar.append(AddTraceScaleBar(xunit='ms', yunit='mV' if l[0]=='Voltage' else 'pA', ax=ax[c]))\r\n plt.subplots_adjust(hspace=.001)\r\n # temp = 510 + c\r\n temp = tic.MaxNLocator(3)\r\n ax[c].yaxis.set_major_locator(temp)\r\n\r\n if (isinstance(setFont, str) and setFont.lower() in ['default', 'arial', 'helvetica']) or \\\r\n (isinstance(setFont, bool) and setFont):\r\n SetFont(ax, fig, fontsize=fontSize, fontname=os.path.join(__location__,'../resources/Helvetica.ttf'))\r\n else:\r\n SetFont(ax, fig, fontsize=fontSize, fontname=setFont)\r\n\r\n # save the figure\r\n if adjustFigH:\r\n fig_size = (fig_size[0], fig_size[1]*nchannels)\r\n\r\n fig.set_size_inches(fig_size)\r\n\r\n # plt.subplots_adjust(hspace=-0.8)\r\n fig.savefig(saveDir, bbox_inches='tight', bbox_extra_artists=tuple(scalebar), dpi=dpi, transparent=True)\r\n # Close the figure after save\r\n plt.close(fig)\r\n # Convert from svg to eps\r\n if '.svg' in saveDir:\r\n svg2eps_ai(source_file=saveDir, target_file=saveDir.replace('.svg', '.eps'))\r\n\r\n\r\n return(ax)\r\n\r\ndef PlotTracesConcatenated(df, index, viewRange, saveDir, colorfy=False, artists=None, dpi=300,\r\n fig_size=None, nullRange=None, hSpaceType='Fixed', hFixedSpace=0.10,\r\n adjustFigW=True, adjustFigH=True, trimH=(None,None),\r\n annotation='Simple', showInitVal=True, setFont='default', fontSize=10,\r\n linewidth=1.0, monoStim=False, stimReflectCurrent=True, **kwargs):\r\n \"\"\"Export traces arranged horizontally.\r\n Good for an experiments acquired over multiple episodes.\r\n trimH: (t1, t2) trim off the beginning of first episode by t1 seconds, and the\r\n the end of the last episode by t2 seconds\r\n \"\"\"\r\n nchannels = len(viewRange.keys())\r\n if not colorfy:\r\n colorfy=['k']\r\n fig, _= plt.subplots(nrows=nchannels, ncols=1, sharex=True)\r\n ax = fig.get_axes()\r\n # text annotation area\r\n textbox = []\r\n nullBase = dict()\r\n currentTime = 0\r\n maxWindow = max(df['Duration'])\r\n for n, i in enumerate(index): #iterate over episodes\r\n zData = df['Data'][i]\r\n ts = zData.Protocol.msPerPoint\r\n channels = []\r\n for c, m in enumerate(viewRange.keys()): # iterate over channels/streams\r\n # Draw plots\r\n X = zData.Time + currentTime\r\n Y = getattr(zData, m[0])[m[1]]\r\n # null the trace\r\n if nullRange is not None:\r\n if n == 0: # calculate nullBase\r\n if isinstance(nullRange, list):\r\n nullBase[(m[0],m[1])] = np.mean(spk_window(Y, ts, nullRange))\r\n else:\r\n nullBase[(m[0],m[1])] = Y[time2ind(nullRange, ts)]\r\n Y = Y - nullBase[(m[0],m[1])]\r\n if n == 0 and trimH[0] is not None:\r\n X = spk_window(X, ts, (trimH[0], None))\r\n Y = spk_window(X, ts, (trimH[0], None))\r\n elif n + 1 == len(index) and trimH[1] is not None:\r\n X = spk_window(X, ts, (None, trimH[1]))\r\n Y = spk_window(X, ts, (None, trimH[1]))\r\n\r\n # Stim channel reflects current channel\r\n if stimReflectCurrent and m[0]=='Stimulus':\r\n CurBase = spk_window(zData.Current[m[1]], ts, viewRange[m][0]) # use view range of stimulus on current\r\n CurBase = np.mean(spk_window(CurBase, ts, [0,50]))\r\n Y = Y + CurBase\r\n # do the plot\r\n if m[0] in ['Voltage', 'Current'] or not monoStim: # temporary workaround\r\n ax[c].plot(X, Y, color=colorfy[n%len(colorfy)], lw=linewidth, solid_joinstyle='bevel', solid_capstyle='butt')\r\n else:\r\n ax[c].plot(X, Y, color='k', lw=linewidth, solid_joinstyle='bevel', solid_capstyle='butt')\r\n # Draw the initial value, only for the first plot\r\n if n == 0 and showInitVal:\r\n InitVal = \"{0:0.0f}\".format(Y[0])\r\n if m[0] == 'Voltage':\r\n InitVal += ' mV'\r\n elif m[0] == 'Current':\r\n InitVal += ' pA'\r\n elif m[0] == 'Stimulus':\r\n if stimReflectCurrent:\r\n InitVal += ' pA'\r\n else:\r\n InitVal = ''\r\n\r\n ax[c].text(X[0]-0.03*(viewRange[m][0][1]-viewRange[m][0][0]), Y[0]-1, InitVal, ha='right', va='center', color=colorfy[n%len(colorfy)])\r\n\r\n if m[1] not in channels:\r\n channels.append(m[1])\r\n\r\n if annotation.lower() != 'none':\r\n final_notes = writeEpisodeNote(zData, viewRange[m][0], channels=channels, mode=annotation)\r\n # Draw some annotations\r\n textbox.append(TextArea(final_notes, minimumdescent=False, textprops=dict(color=colorfy[n%len(colorfy)])))\r\n\r\n # Set some spacing for the next episode\r\n if n+1 < len(index):\r\n if hSpaceType.lower() == 'fixed':\r\n currentTime = currentTime + (len(Y)-1)*ts + maxWindow * hFixedSpace / 100.0\r\n elif hSpaceType.lower() in ['real time', 'realtime', 'rt']:\r\n currentTime = currentTime + (df['Data'][index[n+1]].Protocol.WCtime - zData.Protocol.WCtime)*1000\r\n\r\n # Group all the episodes annotation text\r\n if annotation.lower() != 'none':\r\n box = VPacker(children=textbox, align=\"left\",pad=0, sep=2)\r\n annotationbox = AnchoredOffsetbox(loc=3, child=box, frameon=False, bbox_to_anchor=[1, 1.1])\r\n ax[-1].add_artist(annotationbox)\r\n scalebar = [annotationbox]\r\n else:\r\n scalebar = []\r\n \r\n # set axis\r\n for c, vr in enumerate(viewRange.items()):\r\n ax[c].set_ylim(vr[1][1])\r\n # Add scalebar\r\n scalebar.append(AddTraceScaleBar(xunit='ms', yunit='mV' if vr[0][0]=='Voltage' else 'pA', ax=ax[c]))\r\n plt.subplots_adjust(hspace = .001)\r\n temp = tic.MaxNLocator(3)\r\n ax[c].yaxis.set_major_locator(temp)\r\n\r\n # Set font\r\n if (isinstance(setFont, str) and setFont.lower() in ['default', 'arial', 'helvetica']) or \\\r\n (isinstance(setFont, bool) and setFont):\r\n SetFont(ax, fig, fontsize=fontSize, fontname=os.path.join(__location__,'../resources/Helvetica.ttf'))\r\n else:\r\n SetFont(ax, fig, fontsize=fontSize, fontname=setFont)\r\n \r\n # figure out and set the figure size\r\n if adjustFigW:\r\n fig_size = (np.ptp(ax[0].get_xlim()) / maxWindow * fig_size[0], fig_size[1])\r\n \r\n if adjustFigH:\r\n fig_size = (fig_size[0], fig_size[1]*nchannels)\r\n \r\n fig.set_size_inches(fig_size)\r\n \r\n fig.savefig(saveDir, bbox_inches='tight', bbox_extra_artists=tuple(scalebar), dpi=dpi)\r\n # Close the figure after save\r\n plt.close(fig)\r\n # Convert svg file to eps\r\n if '.svg' in saveDir:\r\n svg2eps_ai(source_file=saveDir, target_file=saveDir.replace('.svg', '.eps'))\r\n \r\n return(ax)\r\n \r\n \r\ndef PlotTracesAsGrids(df, index, viewRange, saveDir=None, colorfy=False, artists=None, dpi=300,\r\n fig_size=None, adjustFigH=True, adjustFigW=True, nullRange=None, \r\n annotation='Simple', setFont='default',gridSpec='Vertical', showInitVal=True,\r\n scalebarAt='all', fontSize=10, linewidth=1.0, monoStim=False,\r\n stimReflectCurrent=True, plotStimOnce=False, **kwargs):\r\n \"Export Multiple episodes arranged in a grid; default vertically\"\"\" \r\n if not colorfy:\r\n colorfy = ['k']\r\n \r\n nchannels = len(viewRange.keys())\r\n nepisodes = len(index)\r\n if isinstance(gridSpec, str):\r\n nrows, ncols = {\r\n 'ver': (nchannels*nepisodes, 1),\r\n 'hor': (1, nchannels*nepisodes),\r\n 'cha': (nchannels, nepisodes),\r\n 'epi': (nepisodes, nchannels)\r\n }.get(gridSpec[:3].lower(), (None, None))\r\n \r\n if nrows is None:\r\n raise(ValueError('Unrecognized gridSpec: {}'.format(gridSpec)))\r\n else:\r\n raise(TypeError('Unrecognized type of argument: \"gridSpec\"'))\r\n \r\n fig, _ = plt.subplots(nrows=nrows, ncols=ncols, sharex=True)\r\n ax = fig.get_axes()\r\n \r\n # text annotation area\r\n textbox = []\r\n viewRange_dict = {}\r\n row, col = 0,0 # keep track of axis used\r\n first_last_mat = [[],[]]\r\n for n, i in enumerate(index):\r\n zData = df['Data'][i]\r\n ts = zData.Protocol.msPerPoint\r\n channels = []\r\n \r\n for c, m in enumerate(viewRange.keys()):\r\n # Draw plots\r\n X = zData.Time\r\n Y = getattr(zData, m[0])[m[1]]\r\n # null the trace\r\n if nullRange is not None:\r\n if isinstance(nullRange, list):\r\n Y = Y - np.mean(spk_window(Y, ts, nullRange))\r\n else: # a single number\r\n Y = Y - Y[time2ind(nullRange, ts)]\r\n # window the plot\r\n X = spk_window(X, ts, viewRange[m][0])\r\n Y = spk_window(Y, ts, viewRange[m][0])\r\n # Stim channel reflects current channel\r\n if stimReflectCurrent and m[0]=='Stimulus':\r\n CurBase = spk_window(zData.Current[m[1]], ts, viewRange[m][0]) # use view range of stimulus on current\r\n CurBase = np.mean(spk_window(CurBase, ts, [0,50]))\r\n Y = Y + CurBase\r\n # do the plot\r\n ind = np.ravel_multi_index((row,col), (nrows, ncols), order='C')\r\n if n == 0:\r\n first_last_mat[0].append(ind)\r\n elif n == len(index)-1:\r\n first_last_mat[-1].append(ind)\r\n \r\n if m[0] in ['Voltage', 'Current'] or not monoStim:\r\n ax[ind].plot(X, Y, color=colorfy[n%len(colorfy)], lw=linewidth, solid_joinstyle='bevel', solid_capstyle='butt')\r\n else: # Stimulus\r\n if plotStimOnce and n > 0:\r\n pass\r\n else:\r\n ax[ind].plot(X, Y, color='k', lw=linewidth, solid_joinstyle='bevel', solid_capstyle='butt')\r\n # View range\r\n viewRange_dict[(row,col)] = list(m)+list(viewRange[m])\r\n # Draw initial value\r\n if showInitVal:\r\n InitVal = \"{0:0.0f}\".format(Y[0])\r\n if m[0] == 'Voltage':\r\n InitVal += ' mV'\r\n elif m[0] == 'Current':\r\n InitVal += ' pA'\r\n elif m[0] == 'Stimulus':\r\n if stimReflectCurrent:\r\n InitVal += ' pA'\r\n else:\r\n InitVal = ''\r\n\r\n ax[ind].text(X[0]-0.03*(viewRange[m][0][1]-viewRange[m][0][0]), Y[0]-1, InitVal, ha='right', va='center', color=colorfy[n%len(colorfy)])\r\n\r\n if m[1] not in channels:\r\n channels.append(m[1])\r\n \r\n # update axis\r\n row, col = {\r\n 'ver': (row+1, col),\r\n 'hor': (row, col+1),\r\n 'cha': (row+1 if c<nrows-1 else 0, col+1 if c==nrows-1 else col),\r\n 'epi': (row+1 if c==ncols-1 else row, col+1 if c<ncols-1 else 0)\r\n }.get(gridSpec[:3].lower())\r\n \r\n if annotation.lower() != 'none':\r\n final_notes = writeEpisodeNote(zData, viewRange[m][0], channels=channels, mode=annotation)\r\n # Draw more annotations\r\n textbox.append(TextArea(final_notes, minimumdescent=False, textprops=dict(color=colorfy[n%len(colorfy)])))\r\n \r\n # Group all the episode annotation text\r\n if annotation.lower() != 'none':\r\n box = VPacker(children=textbox, align=\"left\",pad=0, sep=2)\r\n annotationbox = AnchoredOffsetbox(loc=3, child=box, frameon=False, bbox_to_anchor=[1, 1.1])\r\n ax[-1].add_artist(annotationbox)\r\n scalebar = [annotationbox]\r\n else:\r\n scalebar = []\r\n\r\n # set axis\r\n for c, vr in enumerate(viewRange_dict.items()):\r\n l, r = vr\r\n ind = np.ravel_multi_index(l, (nrows, ncols), order='C')\r\n ax[ind].set_xlim(r[2])\r\n ax[ind].set_ylim(r[3])\r\n # Add scalebar\r\n if scalebarAt.lower()=='all' or (scalebarAt.lower()=='first' and ind in first_last_mat[0]) or (scalebarAt.lower()=='last' and ind in first_last_mat[-1]):\r\n scalebar.append(AddTraceScaleBar(xunit='ms', yunit='mV' if r[0]=='Voltage' else 'pA', ax=ax[ind]))\r\n else: # including 'none'\r\n TurnOffAxis(ax=ax[ind])\r\n \r\n plt.subplots_adjust(hspace = .001)\r\n # temp = 510 + c\r\n temp = tic.MaxNLocator(3)\r\n ax[ind].yaxis.set_major_locator(temp)\r\n\r\n # Draw annotation artist for each export\r\n DrawAnnotationArtists(artists, axs=[ax[ind]])\r\n \r\n if (isinstance(setFont, str) and setFont.lower() in ['default', 'arial', 'helvetica']) or \\\r\n (isinstance(setFont, bool) and setFont):\r\n SetFont(ax, fig, fontsize=fontSize, fontname=os.path.join(__location__,'../resources/Helvetica.ttf'))\r\n else:\r\n SetFont(ax, fig, fontsize=fontSize, fontname=setFont)\r\n\r\n # save the figure\r\n if adjustFigW:\r\n fig_size = (fig_size[0]*ncols, fig_size[1])\r\n if adjustFigH:\r\n fig_size = (fig_size[0], fig_size[1]*nrows)\r\n fig.set_size_inches(fig_size)\r\n \r\n # plt.subplots_adjust(hspace=-0.8)\r\n fig.savefig(saveDir, bbox_inches='tight', bbox_extra_artists=tuple(scalebar), dpi=dpi, transparent=True)\r\n # Close the figure after save\r\n plt.close(fig)\r\n if '.svg' in saveDir:\r\n svg2eps_ai(source_file=saveDir, target_file=saveDir.replace('.svg', '.eps'))\r\n\r\n return(ax)\r\n \r\n\r\ndef data2csv(data):\r\n return\r\n\r\ndef embedMetaData(ax):\r\n \"\"\"embedding meta data to a figure\"\"\"\r\n return\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n sys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/PySynapse\")\r\n#\r\n# data = np.load('R:/tmp.npz')\r\n# df, index, viewRange, saveDir, colorfy = data['df'].tolist(), data['index'].tolist(), data['viewRange'][0],\\\r\n# data['saveDir'].tolist(), data['colorfy'].tolist()\r\n# # plot the figure\r\n# ax= PlotTraces(df=df, index=index, viewRange=viewRange, saveDir='R:/tmp.eps', colorfy=colorfy, setFont=True)\r\n nrows, ncols = 5,2\r\n row, col = 0, 0\r\n for n in np.arange(0,2):\r\n for c in np.arange(0, 5):\r\n print((row, col))\r\n row, col = {\r\n 'ver': (row+1, col),\r\n 'hor': (row, col+1),\r\n 'cha': (row+1 if c<nrows-1 else 0, col+1 if c==nrows-1 else col),\r\n 'epi': (row+1 if c==ncols-1 else row, col+1 if c<ncols-1 else 0)\r\n }.get('cha')\r\n"
},
{
"alpha_fraction": 0.6635679602622986,
"alphanum_fraction": 0.6693227291107178,
"avg_line_length": 37.94827651977539,
"blob_id": "6d9f472b9b852a27a7cc7153d936d9831b3e43de",
"content_id": "bae2e19ab94906603d6f413550eddfd0089d2643",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2259,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 58,
"path": "/Spikes/spikedetekt2/dev/api.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "* spikedetekt2 can be used in two ways:\n\n * It offers a single command that accepts PRM/PRB files/dictionaries and performs the whole spike detection (in the future, it might also do clustering).\n\n * It offers a Python API to customize the whole process.\n\n* Method 1 is for regular users, method 2 is for advanced users and especially for ourselves. It will make things simpler when we'll need to try different algorithms or workflows.\n\n* The Python API in method 2 implements functions that simplify the way we read and write the data.\n\n### Get a reader for the raw data\n\n* Convention: by default, times are given in samples rather than seconds.\n\n* The method `rd.to_seconds()` accepts a number, a tuple, a list, or an array with samples, \n and returns the same in seconds.\n\n rd = RawDataReader('rawdata.ns5', nchannels=?) # from a binary file\n rd = RawDataReader('experiment.kwik') # from a kwik file\n rd = RawDataReader(rawdata) # from a NumPy Nsamples x Nchannels array\n rd = RawDataReader(..., chunk_size=[in samples],\n chunk_overlap=[in samples], \n sample_rate=[in Hz])\n \n### Get a chunk of data\n\n chunk = rd.next_chunk()\n chunk.window_full == (s1, s2) # with overlap\n chunk.window_keep == (s1, s2) # without overlap\n chunk.data_chunk_full # chunk_full_size x Nchannels array\n chunk.data_chunk_keep # chunk_full_size x Nchannels array\n\n rd.reset() # reset chunking and move the cursor to the beginning\n \n### Get parameters\n\nGet user parameters or default parameters if unspecified.\n\n params = get_params('myparams.prm') # from a PRM file\n params = get_params(param1=?, ...) # directly\n param1 = params['param1']\n\n### Create experiment files\n\n create_files('myexperiment', prm=prm, prb=prb)\n files = open_files('myexperiment', mode='a')\n \n### Adding data to an experiment\n\n with Experiment('myexperiment', mode='a') as exp:\n # Append high-pass filtered data to the experiment.\n exp.recordings[0].high.data.append(filtered)\n\n # Append a spike.\n exp.spikes.add(time_samples=..., ...)\n \n # Update waveforms of certain spikes.\n exp.spikes.waveforms[indices, ...] = waveforms\n"
},
{
"alpha_fraction": 0.6284646391868591,
"alphanum_fraction": 0.6368441581726074,
"avg_line_length": 41.813560485839844,
"blob_id": "ce4882004c8293a747c9b895b76e6de0d419e797",
"content_id": "ae993a1c5ac5dfdd1e00c0bd40a3f17fb4cbe3c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7757,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 177,
"path": "/PySynapse/resources/ui_designer/SynapseQt_old.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'SynapseQt.ui'\r\n#\r\n# Created: Sat Apr 18 19:44:35 2015\r\n# by: PyQt4 UI code generator 4.10.4\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\nfrom PyQt4 import QtCore, QtGui\r\nimport os, glob\r\nimport numpy as np\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig)\r\n\r\nclass Ui_MainWindow(QtGui.QMainWindow):\r\n def __init__(self, parent=None):\r\n super(Ui_MainWindow, self).__init__(parent)\r\n self.setupUi(self)\r\n \r\n def setupUi(self, MainWindow):\r\n # Set up basic layout of the main window\r\n MainWindow.setObjectName(_fromUtf8(\"MainWindow\"))\r\n MainWindow.resize(800, 275)\r\n self.centralwidget = QtGui.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))\r\n self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)\r\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\r\n \r\n # Set splitter for two panels\r\n self.splitter = QtGui.QSplitter(self.centralwidget)\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())\r\n self.splitter.setSizePolicy(sizePolicy)\r\n self.splitter.setOrientation(QtCore.Qt.Horizontal)\r\n self.splitter.setObjectName(_fromUtf8(\"splitter\"))\r\n \r\n # Set up data browser tree view\r\n self.setDataBrowser_treeview()\r\n \r\n # Set up Episode list table view\r\n self.setEpisodeList_tableview()\r\n \r\n self.horizontalLayout.addWidget(self.splitter)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n \r\n # Set up menu bar\r\n self.menubar = QtGui.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\r\n self.menubar.setObjectName(_fromUtf8(\"menubar\"))\r\n MainWindow.setMenuBar(self.menubar)\r\n \r\n # Set up status bar\r\n self.statusbar = QtGui.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(_fromUtf8(\"statusbar\"))\r\n MainWindow.setStatusBar(self.statusbar)\r\n \r\n # Execution\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n \r\n # ---------------- Data browser behaviors ---------------------------------\r\n def setDataBrowser_treeview(self):\r\n self.treeview = QtGui.QTreeView(self.splitter)\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(1)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.treeview.sizePolicy().hasHeightForWidth())\r\n self.treeview.setSizePolicy(sizePolicy)\r\n #self.treeview.setSizeAdjustPolicy(QtGui.QAbstractScrollArea.AdjustToContents)\r\n #self.treeview.setTextElideMode(QtCore.Qt.ElideNone)\r\n self.treeview.setObjectName(_fromUtf8(\"treeview\"))\r\n # Set file system as model of the tree view\r\n self.treeview.model = QtGui.QFileSystemModel()\r\n self.treeview.model.setRootPath( QtCore.QDir.currentPath() )\r\n self.treeview.setModel(self.treeview.model)\r\n # Hide columns in file system model\r\n for x in range(0, self.treeview.model.columnCount()):\r\n self.treeview.hideColumn(x+1)\r\n #self.treeview.setColumnWidth(0, 200)\r\n # Set behavior upon clicked\r\n self.treeview.clicked.connect(self.on_sequence_clicked)\r\n # Set behavior upon expanded\r\n self.treeview.expanded.connect(self.on_treeview_expanded)\r\n \r\n @QtCore.pyqtSlot(QtCore.QModelIndex)\r\n def on_treeview_expanded(self, index):\r\n \"\"\"Return file path and file name upon tree expansion\"\"\"\r\n indexItem = self.treeview.model.index(index.row(), 0, index.parent())\r\n # path or filename selected\r\n self.current_fileName = self.treeview.model.fileName(indexItem)\r\n # full path/filename selected\r\n self.current_filePath = self.treeview.model.filePath(indexItem)\r\n if os.path.isdir(self.current_filePath):\r\n # list desired files / sequences; modify display if found targets\r\n self.file_sequence_list(self.current_filePath)\r\n else: # clicked on the replaced item object\r\n # call Sequence listing tree viewer\r\n S = SequenceListingTree(self.current_fileName, self.available_files[self.available_indices==indexItem])\r\n S.show()\r\n \r\n @QtCore.pyqtSlot(QtCore.QModelIndex)\r\n def on_sequence_clicked(self, index):\r\n \"\"\" Display a list of episodes upon sequence clicked\"\"\"\r\n indexItem = self.treeview.model.index(index.row(), 0, index.parent())\r\n # Check if the item clicked is sequence instead of a folder / file\r\n \r\n def file_sequence_list(self, P, delimiter='.', ext='*.dat'):\r\n \"\"\"List files and extract common names as sequence\r\n P is the full path that contains the file sequence\r\n \"\"\"\r\n P = P.encode('ascii','ignore')\r\n # Make sure only files, not folders are used\r\n self.available_files = glob.glob(os.path.join(P,ext))\r\n if not self.available_files:\r\n return\r\n self.available_files.sort() # sort lexicologically\r\n # Get sequence\r\n self.available_sequences = [os.path.basename(f).split(delimiter)[0] for f in self.available_files]\r\n # get indices of list\r\n self.available_sequences, self.available_indices = np.unique(self.available_sequences, return_inverse=True)\r\n \r\n def modify_treeview_model(self, Sequence):\r\n self.model = None\r\n \r\n # --------------- Episode list behaviors -------------------------------------- \r\n def setEpisodeList_tableview(self):\r\n self.tableview = QtGui.QTableView(self.splitter)\r\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(3)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.tableview.sizePolicy().hasHeightForWidth())\r\n self.tableview.setSizePolicy(sizePolicy)\r\n self.tableview.setObjectName(_fromUtf8(\"tableview\"))\r\n self.horizontalLayout.addWidget(self.splitter)\r\n \r\n # Set behavior upon selection\r\n self.tableview.clicked.connect(self.on_sequence_clicked) \r\n \r\n # --------------- Misc -------------------------------------------------------\r\n def retranslateUi(self, MainWindow):\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Syanpse\", None))\r\n \r\n# Objects\r\n# Listing episodes from a sequence\r\nclass SequenceListingTree(QtGui.QApplication):\r\n def __init__(self, parent=None):\r\n \"\"\"Initialize a tree view for sequence browser\"\"\"\r\n super(SequenceListingTree, self).__init__(parent)\r\n\r\n \r\n \r\n# --------------- Test --------------------------------------------\r\n\r\nP = 'X:\\\\Edward\\Data\\\\Traces\\\\Data 14 April 2015'\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n app = QtGui.QApplication(sys.argv)\r\n w = Ui_MainWindow()\r\n w.show()\r\n #sys.exit(app.exec_())\r\n\r\n"
},
{
"alpha_fraction": 0.4282238483428955,
"alphanum_fraction": 0.45985400676727295,
"avg_line_length": 21.941177368164062,
"blob_id": "7ad52016df25b329027991c44b3046570a766ddd",
"content_id": "66b3e5dc6dd7dd99dca618d93586950e6a18b136",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 411,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 17,
"path": "/PySynapse/util/cloudconvert_img.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 13 17:46:03 2017\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport cloudconvert\r\napi = cloudconvert.Api('5PGyLT7eAn0yLbnBU3G-7j1JLFWTfcnFUk6x7k_lhuwzioGwqO7bVQ-lJNunsDkrr9fL1JDdjdVog6iDZ31yIw')\r\nprocess = api.convert({\"input\": \"upload\",\r\n \"file\": open('R:/temp.svg', 'rb'),\r\n \"inputformat\": \"svg\",\r\n \"outputformat\": \"eps\",\r\n })\r\n\r\nprocess.wait()\r\nprocess.download()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5702335238456726,
"alphanum_fraction": 0.5791932344436646,
"avg_line_length": 38.447235107421875,
"blob_id": "f17836c3a387cca215bc6bf5437b69765ff4d540",
"content_id": "0cf89125c81e528792fe42b231ad8206cd02c48c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23550,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 597,
"path": "/Spikes/spikedetekt2/spikedetekt2/dataio/kwik.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"This module provides functions used to write HDF5 files in the new file\nformat.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport json\nimport os\nimport warnings\nfrom collections import OrderedDict, Iterable\n\nimport numpy as np\nimport tables as tb\n\nfrom utils import convert_dtype, ensure_vector\nfrom spikedetekt2.utils.six import itervalues, iteritems, string_types\nfrom spikedetekt2.utils import warn, debug, COLORS_COUNT\n\n# Disable PyTables' NaturalNameWarning due to nodes which have names starting \n# with an integer.\nwarnings.simplefilter('ignore', tb.NaturalNameWarning)\n\n\n# -----------------------------------------------------------------------------\n# File names\n# -----------------------------------------------------------------------------\nRAW_TYPES = ('raw.kwd', 'high.kwd', 'low.kwd')\nFILE_TYPES = ('kwik', 'kwx') + RAW_TYPES\n\ndef get_filenames(name, dir=None):\n \"\"\"Generate a list of filenames for the different files in a given \n experiment, which name is given.\"\"\"\n # Default directory: working directory.\n if dir is None:\n dir = os.getcwd()\n name = os.path.splitext(name)[0]\n return {type: os.path.join(dir, name + '.' + type) for type in FILE_TYPES}\n \ndef get_basename(path):\n bn = os.path.basename(path)\n bn = os.path.splitext(bn)[0]\n if bn.split('.')[-1] in ('raw', 'high', 'low'):\n return os.path.splitext(bn)[0]\n else:\n return bn\n\n\n# -----------------------------------------------------------------------------\n# Opening/closing functions\n# -----------------------------------------------------------------------------\ndef open_file(path, mode=None):\n if mode is None:\n mode = 'r'\n try:\n f = tb.openFile(path, mode)\n return f\n except IOError as e:\n warn(\"IOError: \" + str(e.message))\n return\n\ndef open_files(name, dir=None, mode=None):\n filenames = get_filenames(name, dir=dir)\n return {type: open_file(filename, mode=mode) \n for type, filename in iteritems(filenames)}\n\ndef close_files(name, dir=None):\n if isinstance(name, string_types):\n filenames = get_filenames(name, dir=dir)\n files = [open_file(filename) for filename in itervalues(filenames)]\n else:\n files = itervalues(name)\n [file.close() for file in files if file is not None]\n \ndef files_exist(name, dir=None):\n files = get_filenames(name, dir=dir)\n return os.path.exists(files['kwik'])\n \ndef delete_files(name, dir=None):\n files = get_filenames(name, dir=dir)\n [os.remove(path) for path in itervalues(files) if os.path.exists(path)]\n \ndef get_row_shape(arr, nrows=1):\n \"\"\"Return the shape of a row of an array.\"\"\"\n return (nrows,) + arr.shape[1:]\n \ndef empty_row(arr, dtype=None, nrows=1):\n \"\"\"Create an empty row for a given array.\"\"\"\n return np.zeros(get_row_shape(arr, nrows=nrows), dtype=arr.dtype)\n \n\n# -----------------------------------------------------------------------------\n# HDF5 file creation\n# -----------------------------------------------------------------------------\ndef create_kwik(path, experiment_name=None, prm=None, prb=None):\n \"\"\"Create a KWIK file.\n \n Arguments:\n * path: path to the .kwik file.\n * experiment_name\n * prm: a dictionary representing the contents of the PRM file (used for\n SpikeDetekt)\n * prb: a dictionary with the contents of the PRB file\n \n \"\"\"\n if experiment_name is None:\n experiment_name = ''\n if prm is None:\n prm = {}\n if prb is None:\n prb = {}\n \n file = tb.openFile(path, mode='w')\n \n file.root._f_setAttr('kwik_version', 2)\n file.root._f_setAttr('name', experiment_name)\n\n file.createGroup('/', 'application_data')\n \n # Set the SpikeDetekt parameters\n file.createGroup('/application_data', 'spikedetekt')\n for prm_name, prm_value in iteritems(prm):\n file.root.application_data.spikedetekt._f_setAttr(prm_name, prm_value)\n \n file.createGroup('/', 'user_data')\n \n # Create channel groups.\n file.createGroup('/', 'channel_groups')\n for igroup, group_info in enumerate(prb.get('channel_groups', [])):\n group = file.createGroup('/channel_groups', str(igroup))\n # group_info: channel, graph, geometry\n group._f_setAttr('name', 'channel_group_{0:d}'.format(igroup))\n group._f_setAttr('adjacency_graph', group_info.get('graph', np.zeros((0, 2))))\n file.createGroup(group, 'application_data')\n file.createGroup(group, 'user_data')\n \n # Create channels.\n file.createGroup(group, 'channels')\n channels = group_info.get('channels', [])\n for channel_idx in channels:\n # channel is the absolute channel index.\n channel = file.createGroup(group.channels, str(channel_idx))\n channel._f_setAttr('name', 'channel_{0:d}'.format(channel_idx))\n \n ############### TODO\n channel._f_setAttr('kwd_index', 0)\n channel._f_setAttr('ignored', False)\n channel._f_setAttr('position', group_info.get('geometry', {}). \\\n get(channel_idx, None))\n channel._f_setAttr('voltage_gain', 0.)\n channel._f_setAttr('display_threshold', 0.)\n file.createGroup(channel, 'application_data')\n file.createGroup(channel.application_data, 'spikedetekt')\n file.createGroup(channel.application_data, 'klustaviewa')\n file.createGroup(channel, 'user_data')\n \n # Create spikes.\n spikes = file.createGroup(group, 'spikes')\n file.createEArray(spikes, 'time_samples', tb.UInt64Atom(), (0,),\n expectedrows=1000000)\n file.createEArray(spikes, 'time_fractional', tb.UInt8Atom(), (0,),\n expectedrows=1000000)\n file.createEArray(spikes, 'recording', tb.UInt16Atom(), (0,),\n expectedrows=1000000)\n clusters = file.createGroup(spikes, 'clusters')\n file.createEArray(clusters, 'main', tb.UInt32Atom(), (0,),\n expectedrows=1000000)\n file.createEArray(clusters, 'original', tb.UInt32Atom(), (0,),\n expectedrows=1000000)\n \n fm = file.createGroup(spikes, 'features_masks')\n fm._f_setAttr('hdf5_path', '{{kwx}}/channel_groups/{0:d}/features_masks'. \\\n format(igroup))\n wr = file.createGroup(spikes, 'waveforms_raw')\n wr._f_setAttr('hdf5_path', '{{kwx}}/channel_groups/{0:d}/waveforms_raw'. \\\n format(igroup))\n wf = file.createGroup(spikes, 'waveforms_filtered')\n wf._f_setAttr('hdf5_path', '{{kwx}}/channel_groups/{0:d}/waveforms_filtered'. \\\n format(igroup))\n \n # TODO: add clusters 0, 1, 2, 3 by default\n \n # Create clusters.\n clusters = file.createGroup(group, 'clusters')\n file.createGroup(clusters, 'main')\n file.createGroup(clusters, 'original')\n \n # Create cluster groups.\n cluster_groups = file.createGroup(group, 'cluster_groups')\n file.createGroup(cluster_groups, 'main')\n file.createGroup(cluster_groups, 'original')\n \n # Create recordings.\n file.createGroup('/', 'recordings')\n \n # Create event types.\n file.createGroup('/', 'event_types')\n \n file.close()\n\ndef create_kwx(path, prb=None, prm=None, has_masks=True):\n \"\"\"Create an empty KWX file.\n \n Arguments:\n * prb: the PRB dictionary\n * waveforms_nsamples (common to all channel groups if set)\n * nfeatures (total number of features per spike, common to all channel groups if set)\n * nchannels (number of channels per channel group, common to all channel groups if set)\n \n \"\"\"\n \n if prb is None:\n prb = {}\n if prm is None:\n prm = {}\n \n nchannels = prm.get('nchannels', None)\n nfeatures_per_channel = prm.get('nfeatures_per_channel', None)\n nfeatures = prm.get('nfeatures', None)\n waveforms_nsamples = prm.get('waveforms_nsamples', None)\n \n file = tb.openFile(path, mode='w')\n file.createGroup('/', 'channel_groups')\n \n for ichannel_group, chgrp_info in enumerate(prb.get('channel_groups', [])):\n nchannels_ = len(chgrp_info.get('channels', [])) or nchannels or 0\n waveforms_nsamples_ = chgrp_info.get('waveforms_nsamples', waveforms_nsamples) or 0\n nfeatures_per_channel_ = chgrp_info.get('nfeatures_per_channel', nfeatures_per_channel) or 0\n nfeatures_ = chgrp_info.get('nfeatures', nfeatures) or nfeatures_per_channel_ * nchannels_\n \n assert nchannels_ > 0\n assert nfeatures_ > 0\n assert waveforms_nsamples_ > 0\n \n channel_group_path = '/channel_groups/{0:d}'.format(ichannel_group)\n \n # Create the HDF5 group for each channel group.\n file.createGroup('/channel_groups', \n '{0:d}'.format(ichannel_group))\n \n \n # Determine a sensible chunk shape.\n chunkrows = 10485760 // (nfeatures_ * 4)\n \n # Create the arrays.\n if has_masks:\n # Features + masks.\n file.createEArray(channel_group_path, 'features_masks',\n tb.Float32Atom(), (0, nfeatures_, 2),\n chunkshape=(chunkrows, nfeatures_, 2))\n else:\n file.createEArray(channel_group_path, 'features_masks',\n tb.Float32Atom(), (0, nfeatures_),\n chunkshape=(chunkrows, nfeatures_))\n \n \n # Determine a sensible chunk shape.\n chunkrows = 10485760 // (waveforms_nsamples_ * nchannels_ * 2)\n \n file.createEArray(channel_group_path, 'waveforms_raw',\n tb.Int16Atom(), (0, waveforms_nsamples_, nchannels_),\n chunkshape=(chunkrows, waveforms_nsamples_, nchannels_))\n file.createEArray(channel_group_path, 'waveforms_filtered',\n tb.Int16Atom(), (0, waveforms_nsamples_, nchannels_),\n chunkshape=(chunkrows, waveforms_nsamples_, nchannels_))\n \n file.close()\n \ndef create_kwd(path, type='raw', prm=None,):#recordings=None,):\n \"\"\"Create an empty KWD file.\n \n Arguments:\n * type: 'raw', 'high', or 'low'\n \n \"\"\"\n \n if prm is None:\n prm = {}\n \n file = tb.openFile(path, mode='w')\n file.createGroup('/', 'recordings')\n \n file.close()\n\ndef create_files(name, dir=None, prm=None, prb=None):\n \n filenames = get_filenames(name, dir=dir)\n \n create_kwik(filenames['kwik'], prm=prm, prb=prb)\n create_kwx(filenames['kwx'], prb=prb, prm=prm)\n \n create_kwd(filenames['raw.kwd'], 'raw', prm=prm)\n create_kwd(filenames['high.kwd'], 'high', prm=prm)\n create_kwd(filenames['low.kwd'], 'low', prm=prm)\n \n return filenames\n\n \n# -----------------------------------------------------------------------------\n# Adding items in the files\n# -----------------------------------------------------------------------------\ndef add_recording(fd, id=None, name=None, sample_rate=None, start_time=None, \n start_sample=None, bit_depth=None, band_high=None,\n band_low=None, downsample_factor=1., nchannels=None,\n nsamples=None, data=None):\n \"\"\"fd is returned by `open_files`: it is a dict {type: tb_file_handle}.\"\"\"\n kwik = fd.get('kwik', None)\n \n if data is not None:\n nsamples, nchannels = data.shape\n \n assert nchannels\n \n # The KWIK needs to be there.\n assert kwik is not None\n if id is None:\n # If id is None, take the maximum integer index among the existing\n # recording names, + 1.\n recordings = sorted([n._v_name \n for n in kwik.listNodes('/recordings')])\n if recordings:\n id = str(max([int(r) for r in recordings if r.isdigit()]) + 1)\n else:\n id = '0'\n # Default name: recording_X if X is an integer, or the id.\n if name is None:\n if id.isdigit():\n name = 'recording_{0:s}'.format(id)\n else:\n name = id\n recording = kwik.createGroup('/recordings', id)\n recording._f_setAttr('name', name)\n recording._f_setAttr('start_time', start_time)\n recording._f_setAttr('start_sample', start_sample)\n recording._f_setAttr('sample_rate', sample_rate)\n recording._f_setAttr('bit_depth', bit_depth)\n recording._f_setAttr('band_high', band_high)\n recording._f_setAttr('band_low', band_low)\n \n kwik_raw = kwik.createGroup('/recordings/' + id, 'raw')\n kwik_high = kwik.createGroup('/recordings/' + id, 'high')\n kwik_low = kwik.createGroup('/recordings/' + id, 'low')\n \n kwik_raw._f_setAttr('hdf5_path', '{raw.kwd}/recordings/' + id)\n kwik_high._f_setAttr('hdf5_path', '{high.kwd}/recordings/' + id)\n kwik_low._f_setAttr('hdf5_path', '{low.kwd}/recordings/' + id)\n \n kwik.createGroup('/recordings/' + id, 'user_data')\n \n for type in RAW_TYPES:\n kwd = fd.get(type, None)\n if kwd:\n add_recording_in_kwd(kwd, recording_id=id,\n downsample_factor=downsample_factor,\n nchannels=nchannels, \n nsamples=nsamples, \n data=data)\n \ndef add_recording_in_kwd(kwd, recording_id=0,\n downsample_factor=None, nchannels=None, \n nsamples=None, data=None):\n if isinstance(kwd, string_types):\n kwd = open_file(kwd, 'a')\n to_close = True\n else:\n to_close = False\n \n if data is not None:\n nsamples, nchannels = data.shape\n \n recording = kwd.createGroup('/recordings', str(recording_id))\n recording._f_setAttr('downsample_factor', downsample_factor)\n \n dataset = kwd.createEArray(recording, 'data', \n tb.Int16Atom(), \n (0, nchannels), expectedrows=nsamples)\n \n # Add raw data.\n if data is not None:\n assert data.shape[1] == nchannels\n data_int16 = convert_dtype(data, np.int16)\n dataset.append(data_int16)\n \n kwd.createGroup(recording, 'filter')\n # TODO: filter\n if to_close:\n kwd.close()\n \n return kwd\n \ndef add_event_type(fd, id=None, evt=None):\n \"\"\"fd is returned by `open_files`: it is a dict {type: tb_file_handle}.\"\"\"\n kwik = fd.get('kwik', None)\n # The KWIK needs to be there.\n assert kwik is not None\n if id is None:\n # If id is None, take the maximum integer index among the existing\n # recording names, + 1.\n event_types = sorted([n._v_name \n for n in kwik.listNodes('/event_types')])\n if event_types:\n id = str(max([int(r) for r in event_types if r.isdigit()]) + 1)\n else:\n id = '0'\n event_type = kwik.createGroup('/event_types', id)\n \n kwik.createGroup(event_type, 'user_data')\n \n app = kwik.createGroup(event_type, 'application_data')\n kv = kwik.createGroup(app, 'klustaviewa')\n kv._f_setAttr('color', None)\n \n events = kwik.createGroup(event_type, 'events')\n kwik.createEArray(events, 'time_samples', tb.UInt64Atom(), (0,))\n kwik.createEArray(events, 'recording', tb.UInt16Atom(), (0,))\n kwik.createGroup(events, 'user_data')\n \ndef add_cluster(fd, channel_group_id=None, id=None, clustering='main',\n cluster_group=None, color=None,\n mean_waveform_raw=None,\n mean_waveform_filtered=None,\n ):\n \"\"\"fd is returned by `open_files`: it is a dict {type: tb_file_handle}.\"\"\"\n if channel_group_id is None:\n channel_group_id = '0'\n kwik = fd.get('kwik', None)\n # The KWIK needs to be there.\n assert kwik is not None\n # The channel group id containing the new cluster group must be specified.\n assert channel_group_id is not None\n clusters_path = '/channel_groups/{0:s}/clusters/{1:s}'.format(\n channel_group_id, clustering)\n if id is None:\n # If id is None, take the maximum integer index among the existing\n # recording names, + 1.\n clusters = sorted([n._v_name \n for n in kwik.listNodes(clusters_path)])\n if clusters:\n id = str(max([int(r) for r in clusters if r.isdigit()]) + 1)\n else:\n id = '0'\n cluster = kwik.createGroup(clusters_path, id)\n \n cluster._f_setAttr('cluster_group', cluster_group)\n cluster._f_setAttr('mean_waveform_raw', mean_waveform_raw)\n cluster._f_setAttr('mean_waveform_filtered', mean_waveform_filtered)\n \n # TODO\n quality = kwik.createGroup(cluster, 'quality_measures')\n quality._f_setAttr('isolation_distance', None)\n quality._f_setAttr('matrix_isolation', None)\n quality._f_setAttr('refractory_violation', None)\n quality._f_setAttr('amplitude', None)\n \n kwik.createGroup(cluster, 'user_data')\n \n app = kwik.createGroup(cluster, 'application_data')\n kv = kwik.createGroup(app, 'klustaviewa')\n kv._f_setAttr('color', color or ((int(id) % (COLORS_COUNT - 1)) + 1))\n \ndef add_cluster_group(fd, channel_group_id=None, id=None, clustering='main',\n name=None, color=None):\n \"\"\"fd is returned by `open_files`: it is a dict {type: tb_file_handle}.\"\"\"\n if channel_group_id is None:\n channel_group_id = '0'\n kwik = fd.get('kwik', None)\n # The KWIK needs to be there.\n assert kwik is not None\n # The channel group id containing the new cluster group must be specified.\n assert channel_group_id is not None\n cluster_groups_path = '/channel_groups/{0:s}/cluster_groups/{1:s}'.format(\n channel_group_id, clustering)\n if id is None:\n # If id is None, take the maximum integer index among the existing\n # recording names, + 1.\n cluster_groups = sorted([n._v_name \n for n in kwik.listNodes(cluster_groups_path)])\n if cluster_groups:\n id = str(max([int(r) for r in cluster_groups if r.isdigit()]) + 1)\n else:\n id = '0'\n # Default name: cluster_group_X if X is an integer, or the id.\n if name is None:\n if id.isdigit():\n name = 'cluster_group_{0:s}'.format(id)\n else:\n name = id\n cluster_group = kwik.createGroup(cluster_groups_path, id)\n cluster_group._f_setAttr('name', name)\n \n kwik.createGroup(cluster_group, 'user_data')\n \n app = kwik.createGroup(cluster_group, 'application_data')\n kv = kwik.createGroup(app, 'klustaviewa')\n kv._f_setAttr('color', color or ((int(id) % (COLORS_COUNT - 1)) + 1))\n \ndef _normalize_inplace(x):\n if x is None:\n return\n if x.dtype in (np.float32, np.float64):\n m, M = x.min(), x.max()\n c = max(np.abs(m), np.abs(M))\n x /= float(c)\n\ndef add_spikes(fd, channel_group_id=None, clustering='main',\n time_samples=None, time_fractional=0,\n recording=0, cluster=0, cluster_original=0,\n features_masks=None, features=None, masks=None,\n waveforms_raw=None, waveforms_filtered=None,):\n \"\"\"fd is returned by `open_files`: it is a dict {type: tb_file_handle}.\"\"\"\n if channel_group_id is None:\n channel_group_id = '0'\n kwik = fd.get('kwik', None)\n kwx = fd.get('kwx', None)\n # The KWIK needs to be there.\n assert kwik is not None\n # The channel group id containing the new cluster group must be specified.\n assert channel_group_id is not None\n\n spikes = kwik.root.channel_groups.__getattr__(channel_group_id).spikes\n \n time_samples = ensure_vector(time_samples)\n nspikes = len(time_samples)\n \n ds_features_masks = kwx.root.channel_groups.__getattr__(channel_group_id).features_masks\n ds_waveforms_raw = kwx.root.channel_groups.__getattr__(channel_group_id).waveforms_raw\n ds_waveforms_filtered = kwx.root.channel_groups.__getattr__(channel_group_id).waveforms_filtered\n \n nfeatures = ds_features_masks.shape[1]\n \n if features_masks is None:\n # Default features and masks\n if features is None:\n features = np.zeros((nspikes, nfeatures), dtype=np.float32)\n if masks is None:\n masks = np.zeros((features.shape[0], nfeatures), dtype=np.float32)\n \n # Ensure features and masks have the right number of dimensions.\n # features.shape is (1, nfeatures)\n # masks.shape is however (nchannels,)\n if features.ndim == 1:\n features = np.expand_dims(features, axis=0)\n if masks.ndim == 1:\n masks = np.expand_dims(masks, axis=0)\n \n # masks.shape is now (1,nchannels,)\n # Tile the masks if needed: same mask value on each channel.\n if masks.shape[1] < features.shape[1]:\n nfeatures_per_channel = features.shape[1] // masks.shape[1]\n masks = np.repeat(masks, nfeatures_per_channel, axis = 1)\n # masks.shape is (1, nfeatures) - what we want\n # Concatenate features and masks\n features_masks = np.dstack((features, masks))\n \n \n time_fractional = ensure_vector(time_fractional, size=nspikes)\n recording = ensure_vector(recording, size=nspikes)\n cluster = ensure_vector(cluster, size=nspikes)\n cluster_original = ensure_vector(cluster_original, size=nspikes)\n \n if waveforms_raw is None:\n waveforms_raw = empty_row(ds_waveforms_raw, nrows=nspikes)\n if waveforms_raw.ndim < 3:\n waveforms_raw = np.expand_dims(waveforms_raw, axis=0)\n \n if waveforms_filtered is None:\n waveforms_filtered = empty_row(ds_waveforms_filtered, nrows=nspikes)\n if waveforms_filtered.ndim < 3:\n waveforms_filtered = np.expand_dims(waveforms_filtered, axis=0)\n \n # Make sure we add the correct number of rows to every object.\n assert len(time_samples) == nspikes\n assert len(time_fractional) == nspikes\n assert len(recording) == nspikes\n assert len(cluster) == nspikes\n assert len(cluster_original) == nspikes\n assert features_masks.shape[0] == nspikes\n assert waveforms_raw.shape[0] == nspikes\n assert waveforms_filtered.shape[0] == nspikes\n \n # WARNING: need to normalize the waveforms before converting them to\n # int16. They need to be in [-1,1].\n # _normalize_inplace(waveforms_raw)\n # _normalize_inplace(waveforms_filtered)\n \n spikes.time_samples.append(time_samples)\n spikes.time_fractional.append(time_fractional)\n spikes.recording.append(recording)\n spikes.clusters.main.append(cluster)\n spikes.clusters.original.append(cluster_original)\n ds_features_masks.append(features_masks)\n \n # HACK: disable normalization for the time being, to try to\n # retrieve old output of SD1\n # ds_waveforms_raw.append(convert_dtype(waveforms_raw, np.int16))\n # ds_waveforms_filtered.append(convert_dtype(waveforms_filtered, np.int16))\n ds_waveforms_raw.append(waveforms_raw.astype(np.int16))\n ds_waveforms_filtered.append(waveforms_filtered.astype(np.int16))\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6770833134651184,
"avg_line_length": 23,
"blob_id": "dcf3a3f22ad9b3201a8b53c63e192e0c1e1533ab",
"content_id": "3b470ed557f3f70d6d6072398eaf3bffad08003a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 4,
"path": "/Spikes/spikedetekt2/README.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "spikedetekt2\n============\n\nThe documentation is [here](https://github.com/klusta-team/example).\n"
},
{
"alpha_fraction": 0.524135947227478,
"alphanum_fraction": 0.5349900126457214,
"avg_line_length": 35.072166442871094,
"blob_id": "e304e2d870b5a3dba793e5016dcae42149eb15e6",
"content_id": "db2ff1cf6fba8d630d660db4267c93c5ad941344",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3501,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 97,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/pca.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"PCA routines.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom scipy import signal\nfrom kwiklib.utils.six.moves import range\n\n\n# -----------------------------------------------------------------------------\n# PCA functions\n# -----------------------------------------------------------------------------\ndef compute_pcs(x, npcs=None, masks=None):\n \"\"\"Compute the PCs of an array x, where each row is an observation.\n x can be a 2D or 3D array. In the latter case, the PCs are computed\n and concatenated iteratively along the last axis.\"\"\"\n\n # Ensure x is a 3D array.\n if x.ndim == 2:\n x = x[..., None]\n assert x.ndim == 3\n # Ensure double precision\n x = x.astype(np.float64)\n\n nspikes, nsamples, nchannels = x.shape\n\n if masks is not None:\n assert isinstance(masks, np.ndarray)\n assert masks.ndim == 2\n assert masks.shape[0] == x.shape[0] # number of spikes\n assert masks.shape[1] == x.shape[2] # number of channels\n\n # Compute regularization cov matrix.\n if masks is not None:\n unmasked = masks > 0\n # The last dimension is now time. The second dimension is channel.\n x_swapped = np.swapaxes(x, 1, 2)\n # This is the list of all unmasked spikes on all channels.\n # shape: (n_unmasked_spikes, nsamples)\n unmasked_all = x_swapped[unmasked, :]\n # Let's compute the regularization cov matrix of this beast.\n # shape: (nsamples, nsamples)\n cov_reg = np.cov(unmasked_all, rowvar=0)\n else:\n cov_reg = np.eye(nsamples)\n assert cov_reg.shape == (nsamples, nsamples)\n\n pcs_list = []\n # Loop over channels\n for channel in range(nchannels):\n x_channel = x[:, :, channel]\n # Compute cov matrix for the channel\n if masks is not None:\n # Unmasked waveforms on that channel\n # shape: (n_unmasked, nsamples)\n x_channel = np.compress(masks[:, channel] > 0,\n x_channel, axis=0)\n assert x_channel.ndim == 2\n # Don't compute the cov matrix if there are no unmasked spikes\n # on that channel.\n alpha = 1. / nspikes\n if x_channel.shape[0] <= 1:\n cov = alpha * cov_reg\n else:\n cov_channel = np.cov(x_channel, rowvar=0)\n assert cov_channel.shape == (nsamples, nsamples)\n cov = alpha * cov_reg + cov_channel\n # Compute the eigenelements\n vals, vecs = np.linalg.eigh(cov)\n pcs = vecs.T.astype(np.float32)[np.argsort(vals)[::-1]]\n # Take the first npcs components.\n if npcs is not None:\n pcs_list.append(pcs[:npcs,...])\n else:\n pcs_list.append(pcs)\n # Return the concatenation of the PCs on all channels, along the 3d axis,\n # except if there is only one element in the 3d axis. In this case\n # we convert to a 2D array.\n pcs = np.dstack(pcs_list)\n assert pcs.ndim == 3\n if pcs.shape[2] == 1:\n pcs = pcs[:, :, 0]\n assert pcs.ndim == 2\n return pcs\n\ndef project_pcs(x, pcs):\n \"\"\"Project data points onto principal components.\n\n Arguments:\n * x: a 2D array.\n * pcs: the PCs as returned by `compute_pcs`.\n\n \"\"\"\n x_proj = np.einsum('ijk,jk->ki', pcs, x) # Notice the transposition.\n x_proj *= 100.\n return x_proj\n\n\n"
},
{
"alpha_fraction": 0.6281690001487732,
"alphanum_fraction": 0.6773819327354431,
"avg_line_length": 36.948387145996094,
"blob_id": "4eb490e11b9b148bae98522c5b4512c69928844d",
"content_id": "e851ee3bea4441b600f597bba9f4a934abf67c42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6035,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 155,
"path": "/PySynapse/util/ITC.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# revised 23 June 2015 BWS\r\n\r\nfrom ctypes import *\r\nimport os.path as path\r\nimport time\r\n\r\nclass PyITC():\r\n\tdef __init__(self, driverPath):\r\n\t\tself.initITC(driverPath)\r\n\r\n\tdef initITC(self, driverPath):\r\n\t\tif path.isfile(driverPath):\r\n\t\t\t# typically driverPath is D:/LabWorld/Executables/ITCMM64.dll\r\n\t\t\tself.lib=cdll.LoadLibrary(driverPath)\r\n\t\t\tself.itc = self.lib.ITC18_GetStructureSize()\r\n\t\t\tself.itc = create_string_buffer(self.itc)\r\n\t\t\tstatus = self.lib.ITC18_Open(byref(self.itc), 0)\r\n\t\t\tif status != 0:\r\n\t\t\t\tprint(\"problem with opening ITC18\")\r\n\t\t\tstatus = self.lib.ITC18_Initialize(byref(self.itc), 0)\r\n\t\t\tif status != 0:\r\n\t\t\t\tprint(\"problem with initialize ITC18\")\r\n\t\t\tself.FIFOsize = self.lib.ITC18_GetFIFOSize(byref(self.itc))\r\n\t\t\tprint(\"ITC started with FIFO = \" + str(self.FIFOsize))\r\n\t\telse:\r\n\t\t\tprint(\"could not find ITC18 dll at \" + driverPath)\r\n\t\t\tself.lib = None\r\n\r\n\tdef runITC(self, parmDict):\r\n\t\t# parmDict must contain values for these keys: msPerPoint, sweepWindowMs, extTrig0or1,\r\n\t\t# activeADCs (list of up to 8 numbers), activeDACs (list up to 4), ADCfullScale (list to match # of activeADCs),\r\n\t\t# ADCnames (list), ADCmultiplyFactors (list), stimDict (dict with TTL, DAC0 etc entries),\r\n\t\t# saveFileName (set to None for return trace Dict), protocol (Dict to convert to INI file)\r\n\r\n\t\t# ToDo add flag in parm dict to repeat last episode without reloading everything\r\n\r\n\t\tnumInstructions = 4\r\n\t\tinstructions = (c_int*numInstructions)()\r\n\t\tinstructions[0] = 0x780 | 0x0\r\n\t\tinstructions[1] = 0x780 | 0x800\r\n\t\tinstructions[2] = 0x780 | 0x1000\r\n\t\tinstructions[3] = 0x780 | 0x1800 | 0x4000 | 0x8000\r\n\t\tsamplingRate = int((100. / numInstructions) / 1.25)\r\n\t\tADCranges = (c_int*8)() # 0=10V 1=5V 2=2V 3=1V full scale\r\n\t\twaitForExtTrig = 0\r\n\t\tjunkBeginning = 20 \r\n\t\tstimSize = 50 * numInstructions\r\n\t\tstimData = (c_short*stimSize)() # this needs to be a c_short to keep at 16 bit words\r\n\t\tdacValues = (c_short*numInstructions)()\r\n\t\tdacValues[0] = 0\r\n\t\tdacValues[1] = 0\r\n\t\tdacValues[2] = 2000\r\n\t\tdacValues[3] = 0\r\n\t\tfor index in range(0, stimSize, numInstructions):\r\n\t\t\tfor subindex in range(numInstructions):\r\n\t\t\t\tstimData[index + subindex] = dacValues[subindex]\r\n\r\n\t\tstatus = self.lib.ITC18_SetSequence(byref(self.itc), c_int(len(instructions)), byref(instructions))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with SetSequence ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_SetSamplingInterval(byref(self.itc), c_int(samplingRate))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with SetSamplingInterval ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_SetRange(byref(self.itc), byref(ADCranges))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with SetRange ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_InitializeAcquisition(byref(self.itc))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with InitializeAcquisition ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_WriteFIFO(byref(self.itc), c_int(stimSize), byref(stimData))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with WriteFIFO ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_Start(byref(self.itc), c_int(0), c_int(1), c_int(1), c_int(0))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with Start ITC18 command\")\r\n\t\ttime.sleep(1)\r\n\t\tstatus = self.lib.ITC18_Stop(byref(self.itc))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with Stop ITC18 command\")\r\n\r\n\r\n\t\t\r\n\r\n\tdef runITClowLevel(self, instructions, samplingRate, waitForExtTrig, ADCranges, stimData):\r\n\t\tstatus = self.lib.ITC18_SetSequence(byref(self.itc), c_int(len(instructions)), byref(instructions))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with SetSequence ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_SetSamplingInterval(byref(self.itc), c_int(samplingRate))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with SetSamplingInterval ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_SetRange(byref(self.itc), byref(ADCranges))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with SetRange ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_InitializeAcquisition(byref(self.itc))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with InitializeAcquisition ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_WriteFIFO(byref(self.itc), c_int(len(stimData)), byref(stimData))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with WriteFIFO ITC18 command\")\r\n\t\tstatus = self.lib.ITC18_Start(byref(self.itc), c_int(0), c_int(1), c_int(1), c_int(0))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with Start ITC18 command\")\r\n\t\ttime.sleep(1)\r\n\t\tstatus = self.lib.ITC18_Stop(byref(self.itc))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with Stop ITC18 command\")\r\n\r\n\r\n\tdef setDACvalues(self, newDACvaluesList):\r\n\t\tnumInstructions = 4\r\n\t\tinstructions = (c_int*numInstructions)()\r\n\t\tinstructions[0] = 1920\r\n\t\tinstructions[1] = 1920 | 2048\r\n\t\tinstructions[2] = 1920 | 4098\r\n\t\tinstructions[3] = 1920 | 6144 | 16384 | 32768\r\n\t\tsamplingRate = int((100. / numInstructions) / 1.25)\r\n\t\tADCranges = (c_int*8)()\r\n\t\twaitForExtTrig = 0\r\n\t\tjunkBeginning = 20 \r\n\t\tstimSize = 50 * numInstructions\r\n\t\tstimData = (c_int*stimSize)()\r\n\t\tfor index in range(stimSize):\r\n\t\t\tstimData[index] = 2500\r\n\t\tretValue = self.runITClowLevel(instructions, samplingRate, waitForExtTrig, ADCranges, stimData)\r\n\r\n\tdef closeITC(self):\r\n\t\tstatus = self.lib.ITC18_SetReadyLight(byref(self.itc), 0)\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with turn off ready light\")\r\n\t\tstatus = self.lib.ITC18_Close(byref(self.itc))\r\n\t\tif status != 0:\r\n\t\t\tprint(\"Problem with ITC18 close\")\r\n\t\telse:\r\n\t\t\tprint(\"ITC18 closed\")\r\n \r\nif __name__ == '__main__':\r\n driverPath = 'D:/Edward/Documents/Assignments/Scripts/Python/PySynapse/resources/lib/ITCMM64.dll'\r\n ITC = PyITC(driverPath)\r\n \r\n itcParm = {}\r\n itcParm[\"saveFileName\"] = None\r\n itcParm[\"msPerPoint\"] = 0.2\r\n itcParm[\"sweepWindowMs\"] = 500\r\n itcParm[\"activeADCs\"] = [0, 1]\r\n itcParm[\"activeDACs\"] = [0]\r\n itcParm[\"ADCfullScale\"] = [10, 10]\r\n itcParm[\"ADCnames\"] = [\"CurA\", \"VoltA\"]\r\n itcParm[\"ADCmultiplyFactors\"] = [5.1, 0.123]\r\n stimDict = {}\r\n stimDict[\"DAC0\"] = \"step 100 350 100\"\r\n itcParm[\"stimDict\"] = stimDict\r\n #retValue = ITC.runITC(itcParm)\r\n ITC.runITC([2, 4, 3, 0])\r\n ITC.closeITC()\r\n print(\"done\")"
},
{
"alpha_fraction": 0.4428076148033142,
"alphanum_fraction": 0.5095320343971252,
"avg_line_length": 27.58974266052246,
"blob_id": "17ffe483754b8732319a413cb942a503d6433d07",
"content_id": "1cdc0941e76acb87768cf98a65fb6254490c06c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 39,
"path": "/Plots/simple/horizon.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ncolors = ['#1f77b4','#ff7f0e', '#2ca02c','#d62728','#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd154','#17becf'] # tableau10, or odd of tableau20\r\n\r\ndef layer(y,height):\r\n neg=0.0;pos=0.0\r\n if y>0:\r\n if y-height>=0:\r\n pos=height\r\n y-= pos\r\n else :\r\n pos = y\r\n elif y<0:\r\n if y+height<=0:\r\n neg=height\r\n y += neg\r\n else :\r\n neg = -y\r\n return pos,neg\r\n\r\ndef horizonPlot(x,y,height=50.0,colors = colors, alpha = .10):\r\n vlayer = np.vectorize(layer)\r\n while (y != 0).any():\r\n l = vlayer(y,height)\r\n y -= l[0];y += l[1]\r\n plt.fill_between(x,0,l[0],color=colors[0], alpha=alpha)\r\n plt.fill_between(x,height-l[1],height,color=colors[1], alpha=alpha)\r\n\r\n# Example\r\nif __name__ == '__main__':\r\n fig, ax = plt.subplots(nrows=1,ncols=1)\r\n fig.set_size_inches(3,2)\r\n x = np.linspace(0, np.pi*4, 137)\r\n y = (2*np.random.normal(size=137) + x**2)\r\n xx = np.hstack([-1*x[::-1], x])\r\n yy = np.hstack([-1*y[::-1], y])\r\n horizonPlot(xx,yy)\r\n plt.show()\r\n"
},
{
"alpha_fraction": 0.6899372339248657,
"alphanum_fraction": 0.7002092599868774,
"avg_line_length": 37.37956237792969,
"blob_id": "7b61ec82badebd548da96e047a5e42ad7c1f5cc6",
"content_id": "b11ff0cb35e26b943d1b16bb797db0ddd2432ee7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5257,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 137,
"path": "/python_tutorials/practice_notes_6.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\n# Day 6: December 29, 2012\n\n# Write a file\nfout=open('output.txt','w');#'w' specifies the file can be overwritte\nprint(fout)\n#>>><_io.TextIOWrapper name='output.txt' mode='w' encoding='cp936'>\nline_1=\"This here's the wattle,\\n\";#\\n starts a new line\nfout.write(line_1);#writes line_1 to the first line of the txt file\n#note that for method .write(input), the input must be a string\nline_2=\"the emblem of our land.\\n\";\nfout.write(line_2);#this will append line_2 to the second line of the txt file\nfout.close();#this close the file\n\n#format operators and sequence%: %first operand %second operand\n# '%d': things that follow this (second operand) should be a integer.\n #even if the specified second operand is a decimal, it will convert\n #the decimal into a integer by truncating the number after decimal\ncamels=42; #integer\n'%d' %camels #%format sequence(first operand) %second operand\n#>>>'42'\n#the place where this format sequence can appear anywhere\nA='I have spotted %d camels' %camels;\nprint(A);\n#>>>I have spotted 42 camels\n\n# '%g': things that follow this (second operand) will be formatted to decimal\n# '%s': second operand will be formatted as a string\n\nA='In %d years, I have spotted %g %s.' %(3,0.1,'camels');\nprint(A);\n#>>>In 3 years, I have spotted 0.1 camels.\n#note that the nubmer of format sequence has to match the number of elements\n#in the tuple. The types also have to match\n\n# File names and paths\nimport os; #importing module for working with files and dir\ncurrent_working_directory=os.getcwd();\nprint(current_working_directory);\n#>>>C:\\Users\\Edward\\Documents\\Assignments\\Python\n#>>><class 'str'>\nos.path.abspath('output.txt');#find absolute path\n#>>>'C:\\\\Users\\\\Edward\\\\Documents\\\\Assignments\\\\Python\\\\output.txt'\nos.path.exists('output.txt');#check if some path exists\n#>>>True\nos.path.isdir('output.txt');#check if a path is a directory\n#>>>False\nos.path.isfile('output.txt');#check if a path is a file\n#>>>True\nos.listdir(current_working_directory);#returns a list of files in cwd\n#>>>['output.txt', 'practice_notes_1.py', 'practice_notes_6.py']\n#example function\ndef walk(dirname):\n for name in os.listdir(dirname):\n path=os.path.join(dirname,name);#join dir and file name\n if os.path.isfile(path) #if the one we have is a file\n print(path); #print the file name\n else: #if it is a directory\n walk(path);#otherwise, walk in the subdirectory given by file\n\n# Catching exceptions\ntry: #try to do the following\n fin = open('bad_file');\n for line in fin:\n print(line);\n fin.close();\nexcept: #like catch in MATLAB, do the following if error occurs\n print('Something went wrong');\n#Exercise 14.2\ndef sed(strPat,strRep,fin,fout):\n try:\n fin_open=open(fin,'r');\n fout_open=open(fout,'w');\n for line in fin_open:\n line.replace(strPat,strRep)\n fout_open.write(line);\n fin_open.close();\n fout_open.close();\n except:\n print(fin,'does not exist!');\n \n\n# Database\nimport anydbm;#for managing database\ndb=anydbm.open('captions.db','c');#'c' for creating the database if not exists\n#The database should work like a dictionary\ndb['cleese.png']='Photo of John Cleese.';\n#many dictionary methods, such as .key(), ,value() also work on database\n#keys and values must both be strings\n#after modifying the database, we must close it\ndb.close();\n\n# Pickling\n#pickle is a module that can convert any object into a string, then store in\n#the database; it is also able to convert the string back to object\nimport pickle;\nt=[1,2,3];\ns=pickle.dumps(t);#dump the object 't' into a string 's'\nprint(s);\n#>>>b'\\x80\\x03]q\\x00(K\\x01K\\x02K\\x03e.'\n#though not making much sense to human\nt_back=pickle.loads(s);#convert the string back to object\nprint(t_back);\n#>>>[1, 2, 3]\n#note that t and t_back have the same value, but they are not the same object\n#it has the same effect as copying the object\nt==t_back;\n#>>>True\nt is t_back;\n#>>>False\n#shelve module will incoporate both anydbm and pickle that it converts any\n#object to strings to store in the database, and retrieve them by converting\n#the stored strings back to the object. It appears as if the object is stored\n#as is in the database\n\n# Pipe\nimport subprocess;#a newer module that replaces os\n\n# Modules\n#Modules are simply .py scripts with a bunch of functions\n#to prevent the modules (scripts) execute itself, we may enclose the protion\n#of the code that gives output with\nif __name__=='__main__';#note there are two underlines before and after both\n #'name' and 'main'. Also, '__name___' is built-in\n #variable\n print('whatever we want');\n\n#this means, when we try to run the script from a shell, the variable\n#__name__ has a value of '__main__', whereas when we import the script as a\n#module, the script should not have that value\n#usually, what is being enclosed are the test scripts of each function\n\n#Important note: if the module has already been imported, calling import will\n#do nothing, even if the module scripts have been changed after the first import\n#the best way is to restart the program and reimport everything\n\n#This concludes today's study."
},
{
"alpha_fraction": 0.6277711391448975,
"alphanum_fraction": 0.6649191379547119,
"avg_line_length": 42.921051025390625,
"blob_id": "54f154921f92b2e4570c27e6548a7f618ffbeac8",
"content_id": "e17590dbfd88405a05dadca991af0f0fdee2a963",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6676,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 152,
"path": "/fMRI_pipeline/space_time_realign.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\nPYTHONPKGPATH = '/hsgs/projects/jhyoon1/pkg64/pythonpackages/'\n\n#from __future__ import print_function # Python 2/3 compatibility\nimport sys,os\nsys.path.append(os.path.join(PYTHONPKGPATH,'nibabel-1.30'))\nimport nibabel# required for nipy\nsys.path.append(os.path.join(PYTHONPKGPATH,'nipy-0.3'))\nimport numpy as np\nfrom nipy.algorithms.registration import SpaceTimeRealign\nfrom nipy.algorithms.slicetiming import timefuncs\nfrom nipy import load_image, save_image\n\n# Input images\ndef space_time_realign(Images,TR=2,numslices=None,SliceTime='asc_alt_2',RefScan=0,Prefix='ra'):\n '''\n 4D simultaneous slice timing and spatial realignment. Adapted from\n Alexis Roche's example script, and extend to be used for multiplex\n imaging sequences\n \n Inputs:\n \n Images: list of images, input as a list of strings/paths to images\n \n numslices: for non-multiplex sequence, default to be the number of\n slices in the image. For multiplex sequence, enter as a tuple,\n such that the first element is the number of planes acquired in\n parallel between each other, and the second element is the number\n of slices of each parallel plane/slab, i.e. (numplanes,numslices)\n \n SliceTime:enter as a string to specify how the slices are ordered.\n Choices are the following\n 1).'ascending': sequential ascending acquisition\n 2).'descending': sequential descending acquisition\n 3).'asc_alt_2': ascending interleaved, starting at first slice\n 4).'asc_alt_2_1': ascending interleaved, starting at the second\n slice\n 5).'desc_alt_2': descending interleaved, starting at last slice\n 6).'asc_alt_siemens': ascending interleaved, starting at the first\n slice if odd number of slices, or second slice if even number\n of slices\n 7).'asc_alt_half': ascending interleaved by half the volume\n 8).'desc_alt_half': descending interleaved by half the volume\n \n RefScan: reference volume for spatial realignment movement estimation.\n Note that scan 0 is the first scan.\n \n Prefix: prefix of the new corrected images. Default is 'ra'\n \n \n Author: Alexis Roche, 2009.\n Edward Cui, February 2014\n '''\n \n # Load images\n runs = [load_image(run) for run in Images]\n # Parse data info\n if numslices is None:\n numslices = runs[0].shape[2]\n numplanes = 1\n elif isinstance(numslices,tuple):\n (numplanes,numslices) = numslices\n else:\n numplanes = 1\n # Print image info\n if numplanes>1:\n print('Running multiplex: %s' % numplanes)\n print('Number of slices: %s' % numslices)\n # Parse slice timing according to the input\n slice_timing = getattr(timefuncs,SliceTime)(numslices,TR)\n # Repeat the slice timing for multiplex seqquence\n slice_timing = np.tile(slice_timing,numplanes)\n # Print slice timing info\n print('Slice times: %s' % slice_timing)\n # Spatio-temporal realigner\n R = SpaceTimeRealign(runs, tr=TR, slice_times=slice_timing, slice_info=2)\n # Estimate motion within- and between-sessions\n print('Estimating motion ...')\n R.estimate(refscan=RefScan)\n # Resample data on a regular space+time lattice using 4d interpolation\n fname=[None]*len(Images) # output images\n mfname=[None]*len(Images) # output motion parameter files\n print('Saving results ...')\n for n in range(len(Images)):\n # extract motion parameters\n motionparams = np.array([np.concatenate((M.translation,M.rotation),axis=1) for M in R._transforms[n]])\n # set motion parameter file name\n mfname[n] = os.path.join(os.path.split(Images[n])[0], 'rp_a0001.txt')\n # write the motion parameters to file\n np.savetxt(mfname[n],motionparams,fmt='%10.7e',delimiter='\\t')\n # resample data\n corr_run = R.resample(n)\n # set image name\n fname[n] = os.path.join(os.path.split(Images[n])[0], Prefix + os.path.split(Images[n])[1])\n # save image\n save_image(corr_run, fname[n])\n print(fname[n])\n return(fname,mfname)\n\n## help with interface from system environment\n#import re\n#if __name__ == '__main__':\n# try:\n# Images = re.split(sys.argv[0])\n# except:\n# Images = None\n# try:\n# TR = float(sys.argv[1])\n# except:\n# TR = 2\n# try:\n# numslices = tuple(sys.argv[2])\n# except:\n# numslices = None\n# try:\n# SliceTime = str(sys.argv[3])\n# except:\n# SliceTime = 'asc_alt_2'\n# try:\n# RefScan = float(sys.argv[4])\n# except:\n# RefScan = 0\n# try:\n# Prefix = str(sys.argv[5])\n# except:\n# Prefix = 'ra'\n# sys.stdout.write(str(space_time_realign(Images,TR,numslices,SliceTime,RefScan,Prefix)))\n \n\n# whole image\nImages = ['/hsgs/projects/jhyoon1/midbrain_Stanford_3T/stop_signal/subjects/funcs/M3020_CNI_011314_adjusted_scale_factor/block1/6093_4_1.nii',\n '/hsgs/projects/jhyoon1/midbrain_Stanford_3T/stop_signal/subjects/funcs/M3020_CNI_011314_adjusted_scale_factor/block2/6093_5_1.nii',\n '/hsgs/projects/jhyoon1/midbrain_Stanford_3T/mid/subjects/funcs/M3020_CNI_011314_adjusted_scale_factor/block1/6093_6_1.nii',\n '/hsgs/projects/jhyoon1/midbrain_Stanford_3T/mid/subjects/funcs/M3020_CNI_011314_adjusted_scale_factor/block2/6093_7_1.nii',\n '/hsgs/projects/jhyoon1/midbrain_Stanford_3T/mid/subjects/funcs/M3020_CNI_011314_adjusted_scale_factor/block3/6093_8_1.nii',\n '/hsgs/projects/jhyoon1/midbrain_Stanford_3T/RestingState/subjects/funcs/M3020_CNI_011314_adjusted_scale_factor/6093_9_1.nii']\nfname = space_time_realign(Images,TR=2,numslices=(3,25),SliceTime='asc_alt_2',RefScan=0,Prefix='ra')\n\n## slab 1\n#Images = ['/hsgs/projects/jhyoon1/midbrain_Stanford_3T/RestingState/subjects/funcs/M3020_CNI_011314/0001_1.nii.gz'];\n#fname = space_time_realign(Images,TR=2,numslices=None,SliceTime='asc_alt_2',RefScan=0,Prefix='ra')\n#\n## slab 2\n#Images = ['/hsgs/projects/jhyoon1/midbrain_Stanford_3T/RestingState/subjects/funcs/M3020_CNI_011314/0001_2.nii.gz'];\n#fname = space_time_realign(Images,TR=2,numslices=None,SliceTime='asc_alt_2',RefScan=0,Prefix='ra')\n#\n## slab 3\n#Images = ['/hsgs/projects/jhyoon1/midbrain_Stanford_3T/RestingState/subjects/funcs/M3020_CNI_011314/0001_3.nii.gz'];\n#fname = space_time_realign(Images,TR=2,numslices=None,SliceTime='asc_alt_2',RefScan=0,Prefix='ra')\n"
},
{
"alpha_fraction": 0.5565671920776367,
"alphanum_fraction": 0.5870848298072815,
"avg_line_length": 33.31547546386719,
"blob_id": "f956a49603260b3fc5cad5a9d7835e7af05e4074",
"content_id": "96f760114855f5624721708b3b025b4e0b15dc34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5933,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 168,
"path": "/Spikes/EC-PC_spk_sort.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 19 17:06:23 2015\r\n\r\nSpike sorting EC-PC\r\n\r\nNot working at all. Specifically, the distribution of Hilbert power spectrum\r\nis quite different from what is being noted in the paper. Therefore, the model\r\n/ curve fitting procedure fails. \r\n\r\n@author: Edward\r\n\"\"\"\r\nimport sys\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/Plots\")\r\nfrom ImportData import NeuroData\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/Spikes\")\r\nfrom spk_util import *\r\nsys.path.append(\"D:/Edward/Documents/Assignments/Scripts/Python/generic\")\r\nfrom MATLAB import *\r\nfrom matplotlib import pyplot as plt\r\n\r\nimport scipy.signal as sg\r\nimport scipy.optimize as op\r\n\r\ndef spk_snr(Vs, method='rms'):\r\n if method == 'rms':\r\n return( (np.max(Vs) - np.min(Vs)) / rms(Vs) )\r\n else:\r\n raise(NotImplementedError(\"'%s' method is not implemented\"%(method)))\r\n\r\n# Least square solver function\r\ndef lstsq(x, y):\r\n \"\"\"Solve m, c for linear equation\r\n y = mx + c given x, y\r\n \"\"\"\r\n A = np.vstack([x, np.ones(len(x))]).T\r\n m, c = np.linalg.lstsq(A, y)[0]\r\n return(m, c)\r\n \r\n \r\ndef hilbert_Z(Vs):\r\n \"\"\"Calculate power of Hilbert transform\"\"\"\r\n # Data variance\r\n sigma_squared = np.var(Vs)\r\n # Hilbert transform of neurla recording Vs\r\n Vs, l = padzeros(Vs) # pad zeros for faster fft performance\r\n Vst = sg.hilbert(Vs)\r\n # Instantaneous energy in Hilbert space, normalized to data variance\r\n Z = np.abs(Vst)**2 / sigma_squared\r\n Z = Z[0:l]\r\n # Truncate when Z0 < var(Vs)\r\n #Z = Z[Z<sigma_squared]\r\n #Z = Z[Z<40]\r\n return(Z)\r\n\r\ndef spk_detect_EC_PC(Vs, ts, window=5.0, p_thresh=0.95, fz_thresh=1e-6,\r\n z_thresh=3.):\r\n \"\"\"Detect extracellular spikes with EC-PC algorithm.\r\n Inputs:\r\n Vs: time series of recording\r\n ts: sampling rate, in [ms]\r\n window: sliding window to detect spike. Consider this parameter\r\n as the width of the spike. Default 2.0 ms.\r\n p_thresh: probability threshold for spike detection. Default 0.95.\r\n fz_thresh: minimum power to use to calcualte EC-PC. The power needs to\r\n be greater than 0. Default 1e-6.\r\n z_thresh: threshold for power. Use this to filter out low power noise.\r\n Default 5.0.\r\n \r\n Returns:\r\n ind: indices of detected spikes\r\n \r\n The idea is that neural recordings have two components, the exponentially \r\n distributed noise (EC), and power distributed spikes / signal (PC). \r\n This method is claimed to beat other popular threshold based spike\r\n detection algorithms, including RMS, median (Qurioga et al., 2004), \r\n nonlinear energy operator (NEO), and continuous wavelet transform (CWT) \r\n (Nenadic and Burdick, 2005), shown in:\r\n Wing-kin Tam, Rosa So, Cuntai Guan, Zhi Yang. EC-PC spike detection for\r\n high performance Brain-Computer Interface. 2015. IEEE.\r\n \r\n The script is based on the most updated and detailed version of a series\r\n of papers lead by Zhi Yang:\r\n Yin Zhou, Tong Wu, Amir Rastegarnia, Cuntai Guan, Edward Keefer, Zhi Yang.\r\n On the robustness of EC–PC spike detection method for online neural \r\n recording. 2014. Journal of Neuroscience Methods. 235: 316-330.\r\n \"\"\"\r\n # Hilbert transform power Z\r\n Z0 = hilbert_Z(Vs)\r\n \r\n # Use distribution fitting to find a, b, c, lambda1, lambda2\r\n f_Z, Z0 = np.histogram(Z0, bins=500, density=True)\r\n # Center the bin\r\n Z0 = Z0[:-1] + (Z0[1] - Z0[0])/2.\r\n # Take f_Z > 0 only\r\n Z0 = Z0[f_Z>fz_thresh]\r\n f_Z = f_Z[f_Z>fz_thresh]\r\n # Take Z0 > z_thresh only\r\n f_Z = f_Z[Z0>z_thresh]\r\n Z0 = Z0[Z0>z_thresh]\r\n \r\n def f(Z, lambda1, lambda2, a, b, c):\r\n f_n_Z = a*np.exp(-lambda1 * Z)\r\n f_d_Z = b / (Z**((3+2*lambda2)/(2*lambda2)) + c)\r\n # f_d_Z = b / (Z**lambda2 + c)\r\n return(f_n_Z + f_d_Z)\r\n \r\n popt, _ = op.curve_fit(f, Z0, f_Z)\r\n lambda1, lambda2, a, b, c = popt\r\n f_n_Z = a*np.exp(-lambda1 * Z0)\r\n f_d_Z = b / (Z0**((3+2*lambda2)/(2*lambda2)) + c)\r\n # f_d_Z = b / (Z0**lambda2 + c)\r\n p_Z = f_d_Z / (f_d_Z + f_n_Z)\r\n \r\n # Debug plot\r\n fig, ax = plt.subplots(nrows=1, ncols=1)\r\n ax.plot(Z0, f_Z, label='Data')\r\n ax.plot(Z0, f_n_Z, label='EC')\r\n ax.plot(Z0, f_d_Z, label='PC')\r\n ax.plot(Z0, f_d_Z + f_d_Z, label='Sum')\r\n ax.legend()\r\n ax.set_yscale('log')\r\n fig.set_size_inches(20, 6)\r\n \r\n # Slide the window across the time series and calculate the probability\r\n m = int(np.ceil(window / ts / 2.0))\r\n m = np.arange(-m, m+1, 1)\r\n # calculate expected iterations\r\n t_ind = np.arange(-min(m), len(Vs), len(m))\r\n nbin = len(t_ind)\r\n P_vect = np.zeros(nbin)\r\n nbin -= 1\r\n for n, t in enumerate(t_ind):\r\n i = m + t\r\n if n == nbin:\r\n i = i[i<len(Vs)]\r\n # calcualte power of the current window\r\n Z_i = hilbert_Z(Vs[i])\r\n Z_i = max(Z_i) # winner takes all\r\n # Find the closest match of Z in p_Z\r\n P_vect[n] = np.interp(Z_i, Z0, p_Z)\r\n \r\n # Threshold filter the probability vector\r\n ind = np.where(P_vect<p_thresh)[0]\r\n ind = t_ind[ind]\r\n\r\n # Debug plot\r\n fig, ax = plt.subplots(nrows=1, ncols=1)\r\n ax.plot(Vs)\r\n ax.plot(ind,140*np.ones(len(ind)), 'o')\r\n fig.set_size_inches(30,10)\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n datadir = 'D:/Data/Traces/2015/11.November/Data 20 Nov 2015/Slice C.20Nov15.S1.E10.dat'\r\n # Load data\r\n zData = NeuroData(datadir, old=True)\r\n ts = zData.Protocol.msPerPoint\r\n Vs = zData.Current['A']\r\n #Vs = spk_filter(Vs, ts, Wn=[300., 3000.], btype='bandpass')\r\n Vs = spk_window(Vs, ts, [0,5000])\r\n spk_detect_EC_PC(Vs, ts, window=5.0, p_thresh=0.95, fz_thresh=1e-6,\r\n z_thresh=3.0)\r\n# window=2.0\r\n# p_thresh=0.95\r\n# fz_thresh=1e-6\r\n# z_thresh=5."
},
{
"alpha_fraction": 0.520447313785553,
"alphanum_fraction": 0.5452894568443298,
"avg_line_length": 34.82826614379883,
"blob_id": "2605f58a71f66829d008faf4a8c3c7702c173e28",
"content_id": "c1d2434dce656b87ac8e19a1f8f233e759d7e0a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24233,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 658,
"path": "/generic/robustica.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nRobustICA Python implementation\r\nAdapted from original author's MATLAB script\r\n\r\nCreated on Mon Dec 21 19:24:57 2015\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\ndef kurt_gradient_optstep(w, X, s, P, wreal, verbose=False):\r\n \"\"\" Computes optimal step size in the gradient-based optimization of the\r\n normalized kurtosis contrast (single iteration).\r\n\r\n Data-based version.\r\n\r\n See references below for details.\r\n\r\n SYNTAX:\r\n g, mu_opt, norm_g = kurt_gradient_optstep(w, X, s, P, wreal)\r\n\r\n INPUTS:\r\n w : current extracting vector coefficients\r\n\r\n X : sensor-output data matrix (one signal per row, one sample\r\n per column)\r\n\r\n s : source kurtosis sign; if zero, the maximum absolute value\r\n of the contrast is sought\r\n\r\n P : projection matrix (used in deflationary orthogonalization;\r\n identity matrix otherwise)\r\n\r\n wreal : if different from zero, keep extracting vector real valued\r\n by retaining only the real part of the gradient (useful,\r\n for instance, in separating real-valued mixtures in the\r\n frequency domain, as in the RobustICA-f algorithm).\r\n\r\n verbose: verbose operation if true\r\n * default: False (quiet operation).\r\n\r\n OUTPUTS:\r\n g : search direction (normalized gradient vector)\r\n\r\n mu_opt : optimal step size globally optimizing the normalized\r\n kurtosis contrast function along direction g from f\r\n\r\n norm_g : non-normalized gradient vector norm.\r\n\r\n REFERENCES:\r\n\r\n - V. Zarzoso and P. Comon, <a href = \"http://www.i3s.unice.fr/~zarzoso/biblio/tnn10.pdf\">\"Robust independent component analysis by iterative maximization</a>\r\n <a href = \"http://www.i3s.unice.fr/~zarzoso/biblio/tnn10.pdf\">of the kurtosis contrast with algebraic optimal step size\"</a>,\r\n IEEE Transactions on Neural Networks, vol. 21, no. 2, pp. 248-261, Feb. 2010.\r\n\r\n - V. Zarzoso, P. Comon and M. Kallel, <a href = \"http://www.i3s.unice.fr/~zarzoso/biblio/eusipco06.pdf\">\"How fast is FastICA?\"</a>,\r\n in: Proceedings EUSIPCO-2006, XIV European Signal Processing Conference,\r\n Florence, Italy, September 4-8, 2006.\r\n\r\n Please, report any bugs, comments or suggestions to\r\n <a href = \"mailto:[email protected]\">zarzoso(a)i3s.unice.fr</a>.\r\n\r\n HISTORY:\r\n\r\n <modification date>: - <modification details>\r\n\r\n -- 2014/11/21: Version 3 release ------------------------------------------\r\n\r\n 2014/06/25: - added 'wreal' input parameter to allow the separation of\r\n real-valued mixtures in complex (e.g., frequency) domain\r\n\r\n -- 2010/02/16: Version 2 release -----------------------------------------\r\n\r\n 2010/02/09: - include gradient norm as output parameter (for use as an\r\n additional termination criterion)\r\n\r\n 2009/03/04: - removed test for constant contrast; sometimes the\r\n algorithm stopped too early, because the contrast was\r\n not actually constant, leading to suboptimal extraction\r\n results\r\n - if best candidate root is complex valued, its real part\r\n can be retained as optimal step size, but contrast is not\r\n guaranteed to increase monotonically in that case; to\r\n avoid this problem, only the real parts of the roots\r\n are considered\r\n\r\n 2009/03/02: - simplified expressions of gradient and optimal step size,\r\n as in TNN paper\r\n\r\n 2008/04/01: - problem encountered on 2008/03/25 using orthogonalization:\r\n due to nearly-zero gradient appearing when just one\r\n source is left, since the contrast function then becomes\r\n constant; normalization after projection in such a case\r\n destroys co-linearity between gradient and extracting\r\n vector (checking for zero gradient should probably be\r\n used as additional termination test in the next version\r\n of the algorithm; see modification on 2010/02/09)\r\n\r\n -- 2008/03/31: Version 1 release ----------------------------------------------------------------\r\n\r\n 2008/03/26: - added this help\r\n\r\n 2008/03/25: - projecting the gradient after normalization seems to\r\n improve conditioning and accelerate convergence in the\r\n extraction of the last sources\r\n\r\n 2008/03/24: - created by Vicente Zarzoso\r\n (University of Nice - Sophia Antipolis, France).\r\n \"\"\"\r\n\r\n # verbose = 0\r\n L, T = np.shape(X)\r\n\r\n mu_opt = 0 # default optimal step-size value\r\n norm_g = 0 # initialize gradient norm\r\n\r\n # Compute search direction (gradient vector)\r\n\r\n # compute necessary interim values\r\n y = w.conj().T * X\r\n\r\n ya2 = y * y.conj()\r\n y2 = y * y\r\n ya4 = ya2 * ya2\r\n\r\n Eya2 = np.mean(ya2)\r\n Ey2 = np.mean(y2)\r\n Eya4 = np.mean(ya4)\r\n\r\n if abs(Eya2) < np.finfo(float).eps: # check for zero denominator\r\n if verbose:\r\n print('>>> OPT STEP SIZE: zero power\\n')\r\n g = np.zeros(L)\r\n norm_g = 0\r\n return(g, mu_opt, norm_g)\r\n\r\n # compute gradient if contrast denominator is not null\r\n Eycx = X*y.conj().T/T\r\n Eyx = X*y.T/T\r\n Ey3x = X*(ya2*y).conj().T/T\r\n\r\n # contrast numerator and denominator at current point\r\n p1 = Eya4 - abs(Ey2)^2\r\n p2 = Eya2\r\n\r\n g = 4.*( (Ey3x - Eyx*Ey2.conj().T)*p2 - p1*Eycx )/p2**3.;\r\n\r\n g = P*g # project if required (normalize later)\r\n\r\n norm_g = np.linalg.norm(g)\r\n\r\n if norm_g < np.finfo(float).eps: # check for zero\r\n if verbose:\r\n print('>>> OPT STEP SIZE: zero gradient\\n')\r\n return(g, mu_opt, norm_g)\r\n\r\n # keep only real part if real-valued extracting vectors are required\r\n if wreal:\r\n g = np.real(g)\r\n\r\n # normalize the gradient -> parameter of interest: direction improves\r\n # conditioning of opt step-size polynomial\r\n g = g / norm_g\r\n\r\n # Compute optimal step size\r\n gg = g.conj().T * X\r\n\r\n # calculate interim values for contrast rational function\r\n ya2 = y * y.conj()\r\n ga2 = gg * gg.conj()\r\n reygc = (y * gg.conj()).real\r\n g2 = gg * gg\r\n yg = y * gg\r\n\r\n Eya2reygc = np.mean(ya2*reygc)\r\n Ereygc2 = np.mean(reygc**2.)\r\n Ega2reygc = np.mean(ga2*reygc)\r\n Ega4 = np.mean(ga2**2.)\r\n Eya2ga2 = np.mean(ya2*ga2)\r\n Ega2 = np.mean(ga2)\r\n Ereygc = np.mean(reygc)\r\n Eg2 = np.mean(g2)\r\n Eyg = np.mean(yg)\r\n\r\n h0 = Eya4 - abs(Ey2)**2\r\n h1 = 4*Eya2reygc - 4*real(Ey2*Eyg.conj().T)\r\n h2 = 4*Ereygc2 + 2*Eya2ga2 - 4*abs(Eyg)^2 - 2*real(Ey2*Eg2.conj().T)\r\n h3 = 4*Ega2reygc - 4*real(Eg2*Eyg.conj().T)\r\n h4 = Ega4 - abs(Eg2)^2\r\n\r\n P = [h4, h3, h2, h1, h0]\r\n\r\n i0 = Eya2\r\n i1 = 2*Ereygc\r\n i2 = Ega2\r\n\r\n Q = [i2, i1, i0]\r\n\r\n # normalized kurtosis contrast = P/Q^2 - 2\r\n\r\n a0 = -2*h0*i1 + h1*i0\r\n a1 = -4*h0*i2 - h1*i1 + 2*h2*i0\r\n a2 = -3*h1*i2 + 3*h3*i0\r\n a3 = -2*h2*i2 + h3*i1 + 4*h4*i0\r\n a4 = -h3*i2 + 2*h4*i1\r\n\r\n p = [a4, a3, a2, a1, a0]\r\n\r\n # normalized kurtosis contrast derivative = p/Q^3\r\n # ALTERNATIVE METHOD to compute optimal step-size polynomial oefficients\r\n #\r\n # # obtain contrast-function polynomials\r\n #\r\n # p11 = [Ega4, 4*Ega2reygc, 4*Ereygc2+2*Eya2ga2, 4*Eya2reygc, Eya4];\r\n # p13 = [Eg2, 2*Eyg, Ey2];\r\n # P = p11 - conv(p13, conj(p13)); # numerator\r\n # Q = [Ega2, 2*Ereygc, Eya2]; # square-root of denominator\r\n #\r\n # # compute derivatives\r\n # Pd = [4, 3, 2, 1].*P(1:4);\r\n # Qd = [2, 1].*Q(1:2);\r\n #\r\n # # contrast derivative numerator\r\n # p = conv(Pd, Q) - 2*conv(Qd, P);\r\n\r\n rr = np.roots(p).real # keep real parts only\r\n\r\n Pval = np.polyval(P, rr)\r\n Q2val = np.polyval(Q, rr)**2\r\n\r\n # check roots not shared by denominator\r\n nonzero_Q2val = np.where(Q2val > np.finfo(float).eps)[0]\r\n # Note: in theory, the denominator can never cancel out if the gradient is\r\n # used as a search direction, due to the orthogonality between the\r\n # extracting vector and the corresponding gradient (only exception: if it\r\n # is the last source to be extracted; but this scenario is detected by the\r\n # gradient norm)\r\n\r\n if len(nonzero_Q2val) == 0:\r\n if verbose:\r\n print('>>> OPT STEP SIZE: all roots shared by denominator\\n')\r\n print('Pval = ')\r\n print(Pval.conj().T)\r\n print('\\nQ2val = ')\r\n print(Q2val.conj().T)\r\n print('\\np = ')\r\n print(p)\r\n print('\\nP = ')\r\n print(P)\r\n print('\\nQ = ')\r\n print(Q)\r\n Q2 = np.convolve(Q, Q)\r\n P_Q2 = P/Q2\r\n print('\\nP_Q2 = ')\r\n print(P_Q2)\r\n print('\\n')\r\n return(g, mu_opt, norm_g)\r\n\r\n Pval = Pval[nonzero_Q2val]\r\n Q2val = Q2val[nonzero_Q2val]\r\n rr = rr[nonzero_Q2val]\r\n\r\n Jkm_val = Pval / Q2val - 2. # normalized kurtosis\r\n\r\n if s:\r\n Jkm_val = (s*Jkm_val).real # maximize or minimize kurtosis value,\r\n # depending on kurtosis sign\r\n else:\r\n Jkm_val = abs(Jkm_val) # maximize absolute kurtosis value,\r\n # if no sign is given\r\n im = np.argmax(Jkm_val)\r\n mu_opt = rr[im] # optimal step size\r\n\r\n return(g, mu_opt, norm_g)\r\n\r\n\r\ndef deflation_regression(X, s, dimred):\r\n \"\"\" Performs deflation by subtracting the estimated source contribution to\r\n the observations as:\r\n\r\n X' = X - h*s\r\n\r\n The source direction h is estimated via the least squares solution to the\r\n linear regression problem:\r\n\r\n h_opt = arg min_h ||X - h*s||^2 = X*s'/(s*s').\r\n\r\n SYNTAX:\r\n Xn = deflation_regression(X, s, dimred)\r\n\r\n\r\n INPUTS:\r\n X : observed data (one signal per row, one sample per column)\r\n\r\n s : estimated source (row vector with one sample per column)\r\n\r\n dimred : perform dimensionality reduction if parameter different\r\n from zero.\r\n\r\n OUTPUT:\r\n Xn : observed data after subtraction (one signal per row,\r\n one sample per column).\r\n\r\n Please, report any bugs, comments or suggestions to\r\n <a href = \"mailto:[email protected]\">zarzoso(a)i3s.unice.fr</a>.\r\n\r\n\r\n HISTORY:\r\n\r\n <modification date>: - <modification details>\r\n\r\n -- 2014/11/21: Version 3 release ------------------------------------------\r\n\r\n -- 2010/02/16: Version 2 release ------------------------------------------\r\n\r\n -- 2008/03/31: Version 1 release ----------------------------------------\r\n\r\n 2008/03/26: - added this help\r\n\r\n 2008/03/18: - if required, perform dimensionality reduction via QR\r\n decomposition\r\n\r\n 2008/03/13: - created by Vicente Zarzoso\r\n (I3S Laboratory, University of Nice Sophia Antipolis,\r\n CNRS, France).\r\n \"\"\"\r\n s2 = s * s.conj().T # extracted source power times sample size\r\n\r\n if abs(s2) < np.finfo(float).eps:\r\n # don't perform subtraction if estimated component is null\r\n return(X)\r\n\r\n h = X * s.conj().T / s2 # source direction estimated via least squares\r\n\r\n if dimred:\r\n # with dimensionality reduction (old version) ***********************\r\n n = len(h)\r\n Q = np.concatenate((h, np.eye(n, n-1), axis=0)\r\n Q, R = np.linalg.qr(Q)\r\n Q = Q[:,1:n] # orthonormal basis of orhogonal subspace of h\r\n X = Q.conj().T * X #remaining contribution with dimensionality reduction\r\n else:\r\n # without dimensionality reduction\r\n X = X - h*s\r\n\r\n # if dimred ### an alternative version? *** TO BE TESTED ***\r\n # [n, T] = size(X)\r\n # [V, S, U] = svd(X', 0) #'economy' SVD\r\n # hU = abs(h'*U)\r\n # diagS = diag(S)\r\n # X = sqrt(T)*V(:, 1:n-1)'\r\n # pause\r\n # end # if dimred\r\n\r\n return(X)\r\n\r\ndef robsutica(X, deftype='orthogonalization', dimred=False, kurtsign=0.,\r\n maxiter=1000, prewhi=True, tol=1E-3, verbose=False, wini=None,\r\n wreal=False):\r\n \"\"\" Kurtosis-based RobustICA method for deflationary ICA/BSS\r\n (see references below for details).\r\n\r\n SYNTAX:\r\n S, H, niter, W = robustica(X, **kwargs)\r\n\r\n INPUTS:\r\n X : observed signals (one row per signal, one column per\r\n sample)\r\n\r\n deftype: deflation type: 'orthogonalization', 'regression'\r\n * default: 'orthogonalization'\r\n\r\n dimred: dimensionality reduction in regression if parameter\r\n different from zero; (not used in deflationary\r\n orthogonalization)\r\n * default: false\r\n\r\n kurtsign: source kurtosis signs (one element per source);\r\n maximize absolute normalized kurtosis if corresponding\r\n element = 0;\r\n * default: zero vector (maximize absolute normalized\r\n kurtosis for all sources)\r\n\r\n maxiter: maximum number of iterations per extracted source;\r\n * default: 1000\r\n\r\n prewhi: prewhitening (via SVD of the observed data matrix);\r\n * default: true\r\n\r\n tol: threshold for statistically-significant termination\r\n test of the type\r\n ||wn - p*w||/||w|| < tol/sqrt(sample size);\r\n (up to a phase shift p)\r\n termination is also tested by comparing the gradient\r\n norm according to:\r\n ||g|| < tol/sqrt(sample size);\r\n termination test is not used if tol < 0, so that the\r\n algorithm runs the maximu number of iterations\r\n (except if optimal step size reaches a null value)\r\n * default: 1E-3\r\n\r\n verbose: verbose operation if true\r\n * default: False (quiet operation).\r\n\r\n wini: extracting vectors initialization for RobustICA\r\n iterative search; if empty, identity matrix of suitable\r\n dimensions is used\r\n * default: None\r\n\r\n wreal: if different from zero, keep extracting vector real\r\n valued by retaining only the real part of the gradient;\r\n useful, for instance, in separating real-valued mixtures\r\n in the frequency domain, as in the RobustICA-f algorithm\r\n * default: False.\r\n\r\n OUTPUTS:\r\n S : estimated sources signals (one row per signal,\r\n one column per sample)\r\n\r\n H : estimated mixing matrix\r\n\r\n niter : number of iterations (one element per extracted source)\r\n\r\n W : estimated extracting vectors\r\n (acting on whitened observations if prewhitened is\r\n required; otherwise, acting on given observations).\r\n\r\n EXAMPLES:\r\n\r\n >> S = robustica(X, **kwargs);\r\n\r\n - RobustICA with prewhitening, deflationary orthogonalization, identity\r\n matrix initialization, up to 1000 iteratons per source, termination\r\n threshold 1E-3/(sample size), without aiming at any specific source\r\n (default)\r\n\r\n S = robustica(X)\r\n\r\n - RobustICA with prewhitening and regression-based deflation:\r\n\r\n ... deftype='regression' ...\r\n\r\n - RobustICA without prewhitening, with regression-based deflation:\r\n\r\n ... deftype='regression', prewhi=False ...\r\n\r\n - RobustICA without prewhitening, with regression-based deflation and\r\n random initialization:\r\n\r\n ... deftype='regression', prewhi=False, \\\r\n wini=np.randn(np.shape(X)[0]) ....\r\n\r\n - RobustICA without prewhitening, with regression-based deflation and\r\n dimensionality reduction:\r\n\r\n ... deftype='regression', dimred=True, prewhi=False ...\r\n\r\n - RobustICA with prewhitening, regression-based deflation, verbose operation:\r\n\r\n ... deftype='regression', verbose=True ...\r\n\r\n - RobustICA with prewhitening, deflationary orthogonalization, and exactly\r\n 10 iterations per independent component:\r\n\r\n ... tol=-1, maxiter=10 ...\r\n\r\n - RobustICA with prewhitening, deflationary orthogonalization, targeting\r\n first the sub-Gaussian and then the super-Gaussian sources in a square\r\n mixture of 5 sub-Gaussian and 5 super-Gaussian sources:\r\n\r\n ... kurtsign=[np.ones(1,5), -np.ones(1,5)] ...\r\n\r\n - RobustICA with prewhitening, regression-based deflation, targeting first\r\n a sub-Gaussian source:\r\n\r\n ... deftype=regression', \\\r\n kurtsign=np.insert(np.zeros(np.shape(X)[0]-1), 0, -1) ...\r\n\r\n\r\n REFERENCES:\r\n\r\n - V. Zarzoso and P. Comon, <a href = \"http://www.i3s.unice.fr/~zarzoso/biblio/tnn10.pdf\">\"Robust Independent Component Analysis by Iterative Maximization</a>\r\n <a href = \"http://www.i3s.unice.fr/~zarzoso/biblio/tnn10.pdf\">of the Kurtosis Contrast with Algebraic Optimal Step Size\"</a>,\r\n IEEE Transactions on Neural Networks, vol. 21, no. 2, pp. 248-261,\r\n Feb. 2010.\r\n\r\n - V. Zarzoso and P. Comon, <a href = \"http://www.i3s.unice.fr/~zarzoso/biblio/ica07.pdf\">\"Comparative Speed Analysis of FastICA\"</a>,\r\n in: Proceedings ICA-2007, 7th International Conference on Independent Component Analysis\r\n and Signal Separation, London, UK, September 9-12, 2007, pp. 293-300.\r\n\r\n - V. Zarzoso, P. Comon and M. Kallel, <a href = \"http://www.i3s.unice.fr/~zarzoso/biblio/eusipco06.pdf\">\"How Fast is FastICA?\"</a>,\r\n in: Proceedings EUSIPCO-2006, XIV European Signal Processing Conference,\r\n Florence, Italy, September 4-8, 2006.\r\n\r\n\r\n Please, report any bugs, comments or suggestions to\r\n <a href = \"mailto:[email protected]\">zarzoso(a)i3s.unice.fr</a>.\r\n\r\n\r\n HISTORY:\r\n\r\n <modification date>: - <modification details>\r\n\r\n -- 2014/11/21: Version 3 release ------------------------------------------\r\n\r\n 2014/06/25: - added 'wreal' input parameter to allow the separation of\r\n real-valued mixtures in a complex domain (e.g., after\r\n Fourier transform)\r\n - simplified calling syntax using cell-array input argument\r\n (irrelevant for Python)\r\n -- 2010/02/16: Version 2 release ------------------------------------------\r\n\r\n 2010/02/09: - added termination test based on gradient norm\r\n\r\n 2009/03/02: - project extracting vector before normalization\r\n\r\n 2009/02/02: - variable 'thmu' (for step-size based termination test)\r\n removed, as it was not used\r\n\r\n -- 2008/03/31: Version 1 release ------------------------------------------\r\n\r\n 2008/12/03: - modified help info about output parameter W\r\n (extracting vectors act on whitened observation if\r\n prewhitening is required)\r\n\r\n 2008/03/26: - added this help\r\n\r\n 2008/03/13: - created by Vicente Zarzoso\r\n (I3S Laboratory, University of Nice Sophia Antipolis,\r\n CNRS, France).\r\n \"\"\"\r\n\r\n # record size of the input signal\r\n n, T = np.shape(X)\r\n\r\n # remove mean from each column\r\n X = X - np.mean(X, axis=1)[:, np.newaxis]\r\n\r\n # prewhitening if prewhi=True\r\n if prewhi:\r\n if verbose:\r\n print(\">>> Prewhitening\\n\")\r\n\r\n V, D, U = np.linalg.svd(X.conj().T, 0) # economy SVD of data matrix\r\n B = U * D / np.sqrt(T) # PCA mixing-matrix estimate\r\n Z = np.sqrt(T) * V.conj().T # PCA source estimate\r\n else:\r\n Z = X\r\n\r\n # RobustICA algorithm\r\n dimobs = n # number of remaining observationd (may chnage under\r\n # dimensionality reduction)\r\n W = np.zeros(n) # extracting vectors\r\n I = np.eye(n)\r\n P = I # projection matrix for deflationary orthogonalization (if required)\r\n\r\n tol = tol / np.sqrt(T) # a statistically-significant termination threshold\r\n tol2 = np.sign(tol) * tol**2 / 2 # the same threshold in terms of\r\n # extracting vectors's absolute scaler product\r\n niter = np.zeros(n) # number of iterations\r\n\r\n if deftype == 'regression':\r\n do_reg = True\r\n type_text = 'regression-based deflation'\r\n if dimred: # only used in regression mode\r\n type_text += ' and dimensionality reduction'\r\n else: # default\r\n type_text += ', no dimensionality reduction'\r\n\r\n else:\r\n do_reg = False\r\n type_text = 'deflationary orthogonalization'\r\n\r\n if verbose:\r\n print('\\n>>>RobustICA with %s\\n', type_text)\r\n\r\n # iterate over all sources\r\n for k in range(n):\r\n if verbose:\r\n print('> source # %d :\\n', k)\r\n\r\n it = 0\r\n keep_going = True\r\n\r\n w = wini[:, k] # initialization\r\n\r\n # keep only required number of components\r\n if do_reg:\r\n w = w[(n-dimobs):n]\r\n\r\n w = w / np.linalg.norm(w) # normalization\r\n w = P * w # project onto extracted vectors' orthogonal subspace\r\n # (if deflationary orthogonalization)\r\n\r\n signkurt = kurtsign[k] # kurtosis sign of next source to be estimated\r\n\r\n # iterate to extract one source\r\n while keep_going:\r\n it += 1\r\n\r\n # compute KM optimal step size for gradient descent\r\n g, mu_opt, norm_g = kurt_gradient_optstep(w, Z, signkurt, P, wreal,\r\n verbose=verbose)\r\n\r\n # update extracting vector and project if required\r\n wn = P * (w + mu_opt * g)\r\n\r\n wn = wn / np.linalg.norm(wn) # normalize\r\n\r\n # extracting vector convergence test\r\n th = np.abs(1. - np.abs(wn.conj().T*w))\r\n\r\n w = wn\r\n\r\n if th < tol2 or norm_g < tol or it>maxiter or mu_opt == 0:\r\n # finish when extracting vector converges, the gradient is too\r\n # small, too many iterations have been run, or the optimal\r\n # step-size is zero\r\n keep_going = False\r\n # end while keep_going\r\n\r\n if do_reg:\r\n # zero-padding to account for dimensionality reduction in regression\r\n W[:, k] = np.concatenate((w, np.zeros(n-dimobs, 1)), axis=0)\r\n else:\r\n W[:, k] = w # estimated extracting vector\r\n\r\n s = w.conj().T * Z # estimated source\r\n S[k, :] = s\r\n niter[k] = it # number of iterations\r\n\r\n if verbose:\r\n print('%d iterations\\n', it)\r\n\r\n if do_reg:\r\n Z = deflation_regression(Z, s, dimred) # regression + subtraction\r\n dimobs = np.shape(Z)[0] # recompute observation dimension, just in\r\n # case it has changed during deflation\r\n P = np.eyeO(dimobs) # P is not required, but its dimensions should\r\n # decrease according to Z\r\n else:\r\n # projection matrix for orthogonalization (if required)\r\n P = I - W * W.conj().T\r\n # end for k\r\n\r\n if verbose:\r\n print('\\nTotal number of iterations: %d\\n', np.sum(niter))\r\n\r\n # Mixing matrix estimation\r\n H = X * S.conj().T * np.linalg.pinv(S * S.conj().T) # LS estimate\r\n\r\n return(S, H, niter, W)\r\n\r\n\r\nif __name__ == '__main__':\r\n # test\r\n pass\r\n"
},
{
"alpha_fraction": 0.452136754989624,
"alphanum_fraction": 0.4692307710647583,
"avg_line_length": 34.4375,
"blob_id": "75419fe3d667cbf3572cec4e6bdffc473d71da04",
"content_id": "48481cb7002f609c8a54427913fc0162350a4b2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1170,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 32,
"path": "/Plots/archive/helpsortcolor.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 03 18:02:13 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\nDEBUG=True\r\nimport re\r\ntxtdir = 'C:/Users/Edward/Documents/Assignments/Scripts/Python/Plots/resource/colorbrewer2.txt'\r\n\r\ndef readColorBrewers(txtdir, lineexp=r'(\\w+)(?:,|$)\\[([^\\]]+)\\](?:,|$)',\r\n rgbexp = r'(\\d+),(\\d+),(\\d+)'):\r\n \"\"\"read color brewers text file\"\"\"\r\n #colors={}\r\n with open(txtdir, 'r') as fid:\r\n for line in fid:\r\n if not line.strip() or line[0] == \"#\":\r\n continue # skip comments or empty line\r\n if line.strip() == \"@Stop\":\r\n break\r\n name, rgb = re.findall(lineexp, line)[0]\r\n # parse color list\r\n rgb_list = re.findall(rgbexp, rgb)\r\n # change into integer\r\n rgb_list = [tuple([int(r) for r in s]) for s in rgb_list]\r\n # print\r\n rgbstr = \"\"\r\n for n, rgb in enumerate(rgb_list):\r\n rgbstr += \"(%d, (%d,%d,%d)),\" %(n, rgb[0],rgb[1],rgb[2])\r\n print(\"'%s' : OrderedDict([%s])\" % (name, rgbstr[:-1]))\r\n # print(\"'%s',\"% (name)),\r\n fid.close()\r\n "
},
{
"alpha_fraction": 0.6624134182929993,
"alphanum_fraction": 0.6999635696411133,
"avg_line_length": 36.57534408569336,
"blob_id": "ff8e3191dc3f39af1af0de25ac7de8e11c599d80",
"content_id": "6b0ef622f9133650ae9f80f8ace28f75628e1f27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2743,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 73,
"path": "/EMCNA/batch_EM_Clonal_Abundance.sh",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n\n# Iterate / Call ExtractSNP.py\nbam_dir=\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/EM_Clonal_Abundance/\"\nresult_dir=\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/EM_Clonal_Abundance/\"\nPATH_TO_HG19REF=\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/EM_Clonal_Abundance/hs37d5.fa\"\nPATH_TO_SAMTOOLS=\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/samtools-1.1/samtools\"\nPATH_TO_BCFTOOLS=\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/bcftools-1.1/bcftools\"\nPATH_TO_SNPSIFT=\"/mnt/NFS/homeG2/LaFramboiseLab/dxc430/Software/snpEff/SnpSift.jar\"\nmutation=\"DEL\" # type of mutation, UPD or DEL (monosomy)\nchr2use=\"5\" # chromosome to use\nposition_range=\"137528564,153809148\" # SNP position range \"144707068,148942012\"\nmin_read=\"\" # any total reads below this number is filtered out, 10\n\n# Assuming these files are sorted.\ntum_bam_list=(\"D110RACXX_3_1ss_.sam.bam_sorted_only_mapped.bam\") # separated by space\nnorm_bam_list=(\"D110RACXX_4_1ss_.sam.bam_sorted_only_mapped.bam\") # separated by space\nsample_list=(\"D110RACXX\") # separated by space\n\n# echo ${#tum_bam_list[@]}\n# echo ${#norm_bam_list[@]}\n# echo ${#sample_list[@]}\n# exit 0\n\n# Find script's own path; export some global variables;\nPATH_INSTALL=\"$(dirname $(readlink -f ${BASH_SOURCE[0]}))\"\nexport PATH_TO_HG19REF\nexport PATH_TO_SAMTOOLS\nexport PATH_TO_BCFTOOLS\nexport PATH_TO_SNPSIFT\n\n# echo $PATH_TO_HG19REF\n# echo $PATH_TO_SAMTOOLS\n# echo $PATH_TO_BCFTOOLS\n# echo $PATH_TO_SNPSIFT\n# exit 0\n\n# Record list of files to be used in EM\nshared_snp_count_list=\"\"\n\n# Parse options for ExtractSNP\nif [ -n \"$chr2use\" ]; then chr2use=\" -r \"${chr2use}; fi\n\n# Call SNPs\nfor i in \"${!sample_list[@]}\"; do\n\tnorm=${bam_dir}/${norm_bam_list[$i]}\n\ttum=${bam_dir}/${tum_bam_list[$i]}\n\trst=${result_dir}/${sample_list[$i]}/\n\n\tshared_snp_count_list+=\"${rst}/shared_snp_count.txt \"\n\n\t#Before run, check if this sample is already processed\n\tif [ -e ${rst}/shared_snp_count.txt ]; then\n\t\tcontinue\n\tfi\n\n\t# Call\n\techo \"python ${PATH_INSTALL}/ExtractSNP.py $chr2use ${PATH_TO_HG19REF} ${norm} ${tum} ${rst}\"\n\tpython ${PATH_INSTALL}/ExtractSNP.py $chr2use ${PATH_TO_HG19REF} ${norm} ${tum} ${rst}\ndone\n\n# Parse Options for EM\nif [ -n \"$mutation\" ]; then mutation=\"-m \"${mutation}; fi\nif [ -n \"$position_range\" ]; then position_range=\" -p \"${position_range}; fi\nif [ -n \"$min_read\" ]; then min_read=\" -N \"${min_read}; fi\noutput=\" -o \"${result_dir}\"/theta_results\"`date +'_%Y.%m.%d.%H.%M.%S'`\".tsv\"\nsubject_id=\" -i \"$(IFS=$','; echo \"${sample_list[*]}\")\noptions=${mutation}${chr2use}${position_range}${min_read}${subject_id}${output}\n\n\n# Estimate abundance with EM\necho \"python ${PATH_INSTALL}/EM.py ${options} ${shared_snp_count_list}\"\npython ${PATH_INSTALL}/EM.py ${options} ${shared_snp_count_list}\n"
},
{
"alpha_fraction": 0.5980024933815002,
"alphanum_fraction": 0.617977499961853,
"avg_line_length": 26.294116973876953,
"blob_id": "b874e8fbf6c9bd36ecff0b75e4780133517d1971",
"content_id": "c8f54df62a99d15c8f39723e48a821135f419e9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2403,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 85,
"path": "/python_tutorials/ThinkPython/practice_notes_2.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\r\n# Day 2: November 24, 2012\r\nimport math;\r\n\r\n# Conditional statements\r\nx=1;\r\ny=2;\r\nz=3;\r\nif x<y and z<y: #again, don't forget the colon\r\n print(\"Y is teh biggest!\");\r\nelif x<y or z<y:\r\n print(\"Let's do nothing!\");\r\nelse:\r\n print(\"Okay, I am wrong\");\r\n #again, additional empty line for ending this stub\r\n\r\n# Recursion: a recursive function is a function that calls itself\r\ndef countdown(n):\r\n if n<=0:\r\n print(\"Balst Off!\");\r\n else:\r\n print(n);\r\n countdown(n-1);\r\n\r\ncountdown(3)\r\n#The function itself is like a while loop:\r\n#As long as the else statement is executed (by calling itself),\r\n#the loop continues until that the function no longer calls itself\r\n#The output looks like this:\r\n#>>>3\r\n#>>>2\r\n#>>>1\r\n#>>>Blast Off!\r\n\r\n# User Prompt\r\nask_question = input(\"Do you like Python? [Y/N]:\\n\");#asking user to type in something, str, int, float, etc...\r\nif ask_question==\"Y\":\r\n print(\"Me, too\");\r\nelse:\r\n ask_another_question = input(\"Why not?\\n\");\r\n print(\"Oh, okay, I see.\");\r\n\r\n#Note: % or some other symbol in the Python Shell prompts user input\r\n\r\n# Non-void functions\r\nabs(-3.323);#returns the absolute value of the input number\r\n\r\n# Iteration\r\n#for loop\r\nfor i in range(0,4,1):\r\n print(\"Hello, World!\");\r\n\r\n#range(start, stop, step), default_start=0, default_step=1\r\n#all number must be integers\r\n#the first number of the array built by range will be start\r\n#the last number of the array built by range will be (stop-step)\r\n \r\n#The following example iss from: http://en.wikibooks.org/wiki/Non-Programmer's_Tutorial_for_Python_3/For_Loops\r\ndemolist = ['life',42,'the universe',6,'and',7,'everthing'];\r\nfor item in demolist:\r\n print(\"The Current item is:\",item);\r\n \r\n#The output is like this:\r\n#The Current item is: life\r\n#The Current item is: 42\r\n#The Current item is: the universe\r\n#The Current item is: 6\r\n#The Current item is: and\r\n#The Current item is: 7\r\n#The Current item is: everything\r\n\r\n#while loop\r\ndef sequence(n):\r\n while n!=1: #while n is NOT equal to 1\r\n if n>1000:\r\n print(\"This number is too large\");\r\n break; #terminate the execution of the function\r\n elif n%2 == 0:\r\n print(n);\r\n n = n//2;\r\n else:\r\n print(n);\r\n n = n*3+1;\r\n \r\n# This concludes today's study."
},
{
"alpha_fraction": 0.6302502155303955,
"alphanum_fraction": 0.659070611000061,
"avg_line_length": 27.69871711730957,
"blob_id": "b7f77ff0400cc074d6213d8d804a6a0a35d8cd7a",
"content_id": "be4a055c5923f59c7a91daddd21b18e59ec4f65e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4476,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 156,
"path": "/python_tutorials/practice_notes_5.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\n# Day 5: December 25, 2012\n\n# A tuple is a sequence of values.\n# Use parenthesis for tuples, though it is not necessary.\nt1 = ('a','b','c','d','e','f');\n#to make a tuple with a single element, parenthesis does not work. Use comma.\nt2 = 'a',; #<class 'tuple'>\nt3 = ('a'); #<class 'str'>\n#use tuple() to create a tuple (empty or not).\nt4 = tuple();#empty tuple\nt5 = tuple('lupins');#tuples of each element of the string\nprint(t5);\n#>>>('l', 'u', 'p', 'i', 'n', 's')\n#use bracket to index tuple\nt5[3];\n#>>>'i'\n# In contrast to list, tuples are immutable\nt5[3]='A';\n#>>>TypeError: 'tuple' object does not support item assignment\n#we can reassign the tuple with a new tuple\nt5=t5[:3]+('A',)+t5[4:];\nprint(t5);\n#>>>('l', 'u', 'p', 'A', 'n', 's')\n\n# Tuple Assignments\nemail_addr='[email protected]';\nuName,domName=email_addr.split('@');#splitting the string at '@'\nprint(uName);\n#>>>monty\nprint(domName);\n#>>>python.org\n\n# Tuple as return values\nt=divmod(7,3);\nprint(t);\n#>>>(2,1) #(quotient, remainder)\n#we may also do\nquot,remd=divmod(7,3);\nprint(quot);\n#>>>2\nprint(remd);\n#>>>1\n# An example function that returns tuple\ndef min_max(t):\n return min(t),max(t);\n\nt=(1,2,3,4,5,6,7,8);\nt_min,t_max=min_max(t);\nprint(t_min);\n#>>>1\nprint(t_max);\n#>>>8\n\n#'*' in front of the parameter: gather or scatter\n#gather: takes arbitrarily many arguments and do commands with all of them\ndef printAll(*arg):\n print arg; #print every single input arguments\n\n#scatter: given one argument (e.g. tuple), separate them to fit what the command\n#requires\nt=(7,3);\ndivmod(t);\n#>>>TypeError: divmod expected 2 arguments, got 1\ndivmod(*t);\n#>>>(2,1)\n\ndef sumAll(*args): #this should gather all the args into a tuple\n return sum(args); #sums a tuple\n\nsumAll(2,3,4,5,6,2,3,4,1);\n#>>>30\n\n# List and tuples\n#zip() combines multiple sequences into a list of tuples\ns='abc';\nt=[0,1,2,3,4];\nz=zip(s,t);#note the returned list has length of the shorter sequence\nprint(z);\n#supposedly, it looks like the following, but Python 3 does not print like this\n#[('a',0),('b',1),('c',2)] -->Python2\n#<zip object at 0x0000000002FBC5C8> -->Python3\nfor letter,number in z:\n print(letter,number);\n\n#>>>\n#a 0\n#b 1\n#c 2\n\n#to transverse the elements and indices a sequence, use enumerate()\nfor index, element in enumerate('abc'):\n print(index,element);\n#>>>\n#0 a\n#1 b\n#2 c\n\n# Dictionaries and tuples\n#.items() method of dictionaries returns a list of tuples, where each element\n#of the tuple is a (key,value) pair\nd={'a':1,'b':2,'c':3,'d':4};\nt=d.items();\nprint(t);\n#>>>dict_items([('d', 4), ('b', 2), ('c', 3), ('a', 1)])\n#in fact, this 'dict_items' is called a iterator, but it behaves like a list,\n#and we may convert this into a list by doing list(d.items())\n\n#create a dictionary of (string,index)\nd=dict(zip('asdfgh',range(len('asdfgh'))));\nprint(d);\n#>>>{'h': 5, 'f': 3, 'g': 4, 'd': 2, 's': 1, 'a': 0}\n\n#.update() method of dictionary adds a list of tuples to the dictionary\nd.update([('z',7),('m',9)]);\n\n#use tuples as keys of a dictionary\nd.clear();#clear all the items in the dictionary\nlastName=['Smith','Wang','Lee','Allen','Georgeton','Schuman'];\nfirstName=['John','Julie','Thomas','Nich','Busk','Henry'];\nphoneNum=['626','232','888','333','123','999'];\n\nd=dict();\nfor i in range(0,len(lastName)):\n d[lastName[i],firstName[i]]=phoneNum[i];\n \n# Tuple comparison\n#tuple compares the first elements of each tuple, if tie, go to the next one\n#sorting words from shortes to the longest\ndef sort_by_length(words_list):\n l=list();#empty list for the sorted words\n for word in words_list:\n l.append((len(word),word));\n \n l.sort(reverse=True);#'reverse=True' make sure sorting in descending order\n sorted_list=[];\n for wl,wd in l:\n sorted_list.append(wd);\n \n return sorted_list;\n\nword_list=['adds','vista','banana','fda','joke'];\nafter_sort=sort_by_length(word_list);\nprint(after_sort);\n#>>>['banana', 'vista', 'joke', 'adds', 'fda']\n#note that 'joke' and 'adds' have the same length. It will sort by the second\n#element of the tuple, which are the words. Since 'j' comes after 'a', and\n#we specified to sort by descending order, 'joke' comes before 'adds'\n\n# When to use tuple\n#1) when trying to return a list of parameters in a function\n#2) when required using an immutable sequence, for instance, creating the key\n#of a dictionary (can also use strings)\n#3) when passing a sequence to a function to avoid aliasing\n\n#This concludes today's study."
},
{
"alpha_fraction": 0.6357738375663757,
"alphanum_fraction": 0.6607970595359802,
"avg_line_length": 27.189189910888672,
"blob_id": "8522dca9c666ec31ba712cf8bf543bb8bd8c11e2",
"content_id": "62536f968021739bf047df6c7532597400d6cb97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2158,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 74,
"path": "/python_tutorials/PythonForDataAnalysis/Chapter_2_introductory.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 28 01:34:26 2014\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n# Working example\r\nimport json\r\npath = 'C:\\Users\\Edward\\Documents\\Assignments\\Python\\python_tutorials\\data_analysis_tutorial\\usagov_bitly_data2013-05-17-1368832207.txt'\r\nrecords = [json.loads(line) for line in open(path)]\r\n\r\n# Counting time zone in pure Python\r\ntime_zones = [rec['tz'] for rec in records if 'tz' in rec]\r\ndef get_counts(sequence):\r\n counts = {}\r\n for x in sequence:\r\n if x in counts:\r\n counts[x] +=1\r\n else:\r\n counts[x]=1\r\n return counts\r\n\r\nfrom collections import defaultdict\r\ndef get_counts2(sequence):\r\n counts = defaultdict(int)# values will initialize to 0\r\n for x in sequence:\r\n counts[x] +=1\r\n return counts\r\n \r\ncounts = get_counts(time_zones)\r\n\r\n# count top 10 time zones\r\ndef top_counts(count_dict,n=10):\r\n value_key_pairs=[(count,tz) for tz, count in count_dict.items()]\r\n value_key_pairs.sort()\r\n return value_key_pairs[-n:]\r\n \r\ntop_counts(counts)\r\n\r\nfrom collections import Counter\r\ncounts = Counter(time_zones)\r\ncounts.most_common(10)\r\n\r\n# Count time zones with Panda\r\nfrom pandas import DataFrame, Series\r\nframe = DataFrame(records)\r\ntz_counts = frame['tz'].value_counts()\r\n\r\nclean_tz = frame['tz'].fillna('Missing')\r\nclean_tz[clean_tz==''] = 'Unknown'\r\ntz_counts = clean_tz.value_counts()\r\n# plot\r\ntz_counts[:10].plot(kind='barh',rot=0)\r\n\r\nresults = Series([x.split()[0] for x in frame.a.dropna()])\r\nresults.value_counts()[:8]\r\n\r\n# split into Windows vs non-Windows users\r\nimport numpy as np\r\ncframe = frame[frame.a.notnull()]\r\noperating_system = np.where(cframe['a'].str.contains('Windows'),\r\n 'Windows','Not Windows')\r\nby_tz_os = cframe.groupby(['tz',operating_system])\r\nagg_counts = by_tz_os.size().unstack().fillna(0)\r\nindexer = agg_counts.sum(1).argsort()\r\ncount_subset = agg_counts.take(indexer)[-10:]\r\ncount_subset.plot(kind='barh',stacked=True)\r\n# normalized to 1 and plot again\r\nnormed_subset = count_subset.div(count_subset.sum(1),axis=0)\r\nnormed_subset.plot(kind='barh',stacked=True)\r\n\r\n\r\n# This concludes today's study"
},
{
"alpha_fraction": 0.651260495185852,
"alphanum_fraction": 0.7289915680885315,
"avg_line_length": 29.799999237060547,
"blob_id": "906eddada89d15953808e68e8f78761d918e22bd",
"content_id": "df690fca898385a551f9d68b63ef4adbed5eb91b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 952,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 30,
"path": "/PySynapse/resources/config0.ini",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Startup\r\ncontinueFromLastSession = True # Not implemented\r\n# Export Parameters\r\nfigSizeW = 4 # inches\r\nfigSizeH = 2 # inches\r\nfigSizeWMulN = True\r\nfigSizeHMulN = True\r\nhSpaceType = Fixed\r\nhFixedSpace = 10 # fixed horizontal spacing between plots, in terms of percentage of total width\r\ndpi = 300 # for raster images\r\nlinewidth = 0.5669291338582677 # trace linewidth\r\nannotation = Label Only\r\nmonoStim = True\r\nshowInitVal = True\r\nfontName = Helvetica\r\nfontSize = 6\r\nannotfontSize = 2\r\nsaveDir = R:/temp.svg\r\ngridSpec = Vertically # only relevant for grid arrangement\r\nstimReflectCurrent = False\r\nscalebarAt = Last # only relevant for grid arrangement ['all', 'first','last','none']\r\ntimeRangeMin = auto\r\ntimeRangeMax = auto\r\nvoltRangeMin = -100\r\nvoltRangeMax = 50\r\ncurRangeMin = -300\r\ncurRangeMax = 300\r\nstimRangeMin = -1000\r\nstimRangeMax = 1000\r\ncolors = ['k','#ff7f0e','#2ca02c','#d62728','#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd22','#17becf']"
},
{
"alpha_fraction": 0.6732186675071716,
"alphanum_fraction": 0.6805896759033203,
"avg_line_length": 31.91666603088379,
"blob_id": "d0e117c2df46fbd1245628d40f931a0ea8faa6ee",
"content_id": "4d1ee431d06db0d821de410f7f07862f7f77db06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 407,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 12,
"path": "/Pycftool/ElideQLabel.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "from PyQt5.QtGui import QPainter, QFontMetrics\r\nfrom PyQt5.QtWidgets import QLabel\r\nfrom PyQt5.QtCore import Qt\r\n\r\nclass ElideQLabel(QLabel):\r\n def paintEvent( self, event ):\r\n painter = QPainter(self)\r\n\r\n metrics = QFontMetrics(self.font())\r\n elided = metrics.elidedText(self.text(), Qt.ElideRight, self.width())\r\n\r\n painter.drawText(self.rect(), self.alignment(), elided)\r\n"
},
{
"alpha_fraction": 0.7846153974533081,
"alphanum_fraction": 0.7846153974533081,
"avg_line_length": 21,
"blob_id": "05f15e9006045d8c6130733636eedba479457bdb",
"content_id": "03889bfbce8fe14bbf958f572e90fbf537051ad3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 3,
"path": "/Spikes/spikedetekt2/spikedetekt2/core/__init__.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "from progressbar import *\nfrom main import *\nfrom script import *"
},
{
"alpha_fraction": 0.6236459612846375,
"alphanum_fraction": 0.6578599810600281,
"avg_line_length": 48.751678466796875,
"blob_id": "dff7fb920870687028b3ac1b2bf3867b0fd917cf",
"content_id": "f7512a5c68802bd33da9e70acbc04ec12924e8c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7570,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 149,
"path": "/fMRI_pipeline/granger_causality.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 30 14:14:35 2014\n\n@author: dcui\n\"\"\"\n\n# Import some modules\nPYTHONPKGPATH = '/hsgs/projects/jhyoon1/pkg64/pythonpackages/'\n\n#from __future__ import print_function # Python 2/3 compatibility\nimport sys,os\nsys.path.append(os.path.join(PYTHONPKGPATH,'nibabel-1.30'))\n#import nibabel# required for nipy\nsys.path.append(os.path.join(PYTHONPKGPATH,'nitime'))\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.mlab import csv2rec\n\n#import nitime\nimport nitime.analysis as nta\nimport nitime.timeseries as ts\nimport nitime.utils as tsu\nfrom nitime.viz import drawmatrix_channels\n\n# Define some parameters\nDataPath = '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/EffectiveConnectivity/'\nresult_dir = '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/EffectiveConnectivity/'\nsubjects = ['MP020_050613','MP021_051713','MP022_051713','MP023_052013','MP024_052913',\n 'MP025_061013','MP026_062613','MP027_062713','MP028_062813','MP029_070213',\n 'MP030_070313','MP031_071813','MP032_071013','MP033_071213','MP034_072213',\n 'MP035_072613','MP036_072913','MP037_080613']\ntarget_data = '%s_timeseries_data.csv'\ncol_offset = 8# starting which column are the ROIs data?\nTR = 2 # TR\nf_ub = np.inf #upper bound of frequency of interest: low pass\nf_lb = 1/128 # lower bound of frequency of interest: high pass\ngranger_order = 1 #predict the current behavior of the time-series based on how many time points back?\nconditions = ['lose5','lose1','lose0','gain0','gain1','gain5']\n\n# Initialize stack objects to record data\nstack_coh = np.zeros(len(subjects),dtype = {'names': conditions,'formats': ['f4']*len(conditions) })\nstack_gl = np.zeros_like(stack_coh)\nstack_R = np.zeros_like(stack_coh)\n\ndef granger_causality_analysis(time_series, f_lb, f_ub, granger_order, roi_names,result_dir, s='', c=''):\n # initialize GrangerAnalyzer object\n G = nta.GrangerAnalyzer(time_series, order = granger_order)\n # initialize CoherenceAnalyzer \n C = nta.CoherenceAnalyzer(time_series)\n # initialize CorrelationAnalyzer\n R = nta.CorrelationAnalyzer(time_series)\n # get the index of the frequency band of interest for different analyzer\n freq_idx_G = np.where((G.frequencies > f_lb) * (G.frequencies < f_ub))[0]\n freq_idx_C = np.where((C.frequencies> f_lb) * (C.frequencies < f_ub)) [0]\n # average the last dimension\n coh = np.mean(C.coherence[:, :, freq_idx_C], -1) \n gl = np.mean(G.causality_xy[:, :, freq_idx_G], -1)\n # Difference in HRF between ROI may result misattriution of causality\n # examine diference between x-->y and y-->x\n g2 = np.mean(G.causality_xy[:,:,freq_idx_G] - G.causality_yx[:,:,freq_idx_G],-1)\n # Figure organization:\n # causality_xy: x-->y, or roi_names[0]-->roi_names[1], roi_names[0]-->roi_names[2], etc.\n # this makes: given a column, transverse through rows. Directionality is\n # from current column label to each row label\n # plot coherence from x to y, \n drawmatrix_channels(coh, roi_names, size=[10., 10.], color_anchor=0)\n plt.title(('%s %s pair-wise Coherence' % (s, c)).replace(' ',' '))\n plt.savefig(os.path.join(result_dir,s,('%s_%s_pairwise_coherence.png' % (s, c)).replace('__','_')))\n # plot correlation from x to y\n #drawmatrix_channels(R.corrcoef, roi_names, size=[10., 10.], color_anchor=0)\n #plt.title(('%s %s pair-wise Correlation' % (s, c)).replace(' ',' '))\n #plt.savefig(os.path.join(result_dir,s,('%s_%s_pairwise_correlation.png' % (s, c)).replace('__','_')))\n # plot granger causality from x to y\n drawmatrix_channels(gl, roi_names, size=[10., 10.], color_anchor=0)\n plt.title(('%s %s pair-wise Granger Causality' % (s, c)).replace(' ',' '))\n plt.savefig(os.path.join(result_dir,s,('%s_%s_pairwise_granger_causality.png' % (s, c)).replace('__','_')))\n # plot granger causliaty forward-backward difference\n drawmatrix_channels(g2, roi_names, size=[10., 10.], color_anchor = 0)\n plt.title(('%s %s pair-wise Granger Causality Forward-Backward Difference' % (s, c)).replace(' ',' '))\n plt.savefig(os.path.join(result_dir,s,('%s_%s_granger_causality_forward_backward_diff.png' % (s, c)).replace('__','_')))\n # close all the figures\n plt.close(\"all\")\n return(coh, gl, g2, G, C, R)\n\n\n# Pair-wise Granger Causality\nfor n, s in enumerate(subjects): # transverse through subjects\n # construct current data path\n current_data = os.path.join(DataPath,s,target_data % (s))\n # read csv file\n data_rec = csv2rec(current_data)\n roi_names = np.array(data_rec.dtype.names)[(0+col_offset):]\n n_seq = len(roi_names) # number of rois \n n_samples = data_rec.shape[0] # number of time points\n data = np.zeros((n_seq, n_samples)) # initialize output data\n # import the data to numpy\n for n_idx, roi in enumerate(roi_names):\n data[n_idx] = data_rec[roi]\n \n # normalize the data of each ROI to be in units of percent change\n pdata = tsu.percent_change(data)\n # get the index of rows/observations to include\n phase_idx = np.logical_or(data_rec['phases'] == 'Cue', data_rec['phases'] == 'Delay')\n pdata = np.delete(pdata,np.where(np.logical_not(phase_idx)),axis=1)\n cond_names = np.delete(data_rec['conditions'], np.where(np.logical_not(phase_idx)), axis=0)\n #pdata[:,np.where(np.logical_not(phase_idx))] = 0\n # initialize TimeSeries object\n #time_series = ts.TimeSeries(pdata,sampling_interval=TR)\n # Do granger causality analysis for each condition\n for m, c in enumerate(conditions):\n # get time points of current condition\n time_series = ts.TimeSeries(pdata[:,np.where(cond_names==c)], sampling_interval=TR)\n # do granger causality analysis\n coh, gl, g2, G, C, R = granger_causality_analysis(time_series, f_lb, f_ub, granger_order, roi_names, result_dir, s, c)\n # store current values into a structure array\n stack_coh[n] = stack_coh[n] + (coh,)\n stack_gl[n][m] = stack_gl[n] + (gl,)\n stack_R[n][m] = stack_R[n] + (R.corrcoef,)\n # clear current time series\n time_series_c = None\n \n # stack the value to sum_'s\n stack_coh = np.dstack((stack_coh, coh))\n stack_gl = np.dstack((stack_gl, gl))\n stack_R = np.dstack((stack_R, R.corrcoef))\n # save extracted data\n np.savez(os.path.join(result_dir,s,'%s_data.npz' % (s)),G,C,R,coh,gl,g2)\n # clear the variables\n coh, gl, g2, G, C, R = (None, None, None, None, None, None)\n \n\n# calcualte mean\nmean_coh = np.mean(stack_coh, axis = 2)\nmean_gl = np.mean(stack_gl, axis = 2)\nmean_R = np.mean(stack_R, axis = 2)\n# plot group averaged\nfigGroup_coh = drawmatrix_channels(mean_coh, roi_names,size=[10.,10.], color_anchor=0)\nplt.title('Control pair-wise Coherence')\nplt.savefig(os.path.join(result_dir,'Control_pair_wise_coherence.png'))\nfigGroup_gl = drawmatrix_channels(mean_gl, roi_names,size=[10.,10.], color_anchor=0)\nplt.title('Control pair-wise Granger Causality')\nplt.savefig(os.path.join(result_dir,'Control_pair_wise_granger_causality.png'))\nfigGroup_R = drawmatrix_channels(mean_R, roi_names,size=[10.,10.], color_anchor=0)\nplt.title('Control pair-wise Correlation')\nplt.savefig(os.path.join(result_dir,'Control_pair_wise_correlation.png'))\n\nnp.savez(os.path.join(result_dir,'Control_Group_average.npz'), stack_coh, \n stack_gl, stack_R, mean_coh, mean_gl, mean_R)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7561521530151367,
"alphanum_fraction": 0.7718120813369751,
"avg_line_length": 53.875,
"blob_id": "21358ca8e385f311cbabce16c98f415f6b3279db",
"content_id": "37122af877a587718f3d877cd28e900e8b4a0e63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 447,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 8,
"path": "/Spikes/spike sorting plan.md",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "## Spike sorting and spike classification pipeline\r\n\r\n1. Detection of spikes via wavelet decomposition.\r\n2. Separation of spikes from noise using two methods:\r\n * Generative model, such as Mixture of Gaussian\r\n * Superparamagnetic clustering --> advnatage: not assuming Gaussian. Maybe advantages in some cases\r\n3. Generate spike templates based on ICA/PCA wavelet components (CWT/DWT)\r\n4. Consider selection of wavelet basis (.e.g bior1.2/3?)\r\n"
},
{
"alpha_fraction": 0.37794119119644165,
"alphanum_fraction": 0.4193277359008789,
"avg_line_length": 34.89922332763672,
"blob_id": "ab26dc7b7fd1394c5bc7fe7082b4c8c188e3f57f",
"content_id": "ac3b3e3864879a620569a44291740c11afb97a87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4760,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 129,
"path": "/generic/geomeotry.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 30 01:41:55 2015\r\n\r\nfunctions for geometry\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\ndef spm_matrix(P, order='T*R*Z*S'):\r\n \"\"\"Python adaptaion of spm_matrix\r\n returns an affine transformation matrix\r\n FORMAT [A, T, R, Z, S] = spm_matrix(P, order)\r\n P(1) - x translation\r\n P(2) - y translation\r\n P(3) - z translation\r\n P(4) - x rotation about - {pitch} (radians)\r\n P(5) - y rotation about - {roll} (radians)\r\n P(6) - z rotation about - {yaw} (radians)\r\n P(7) - x scaling\r\n P(8) - y scaling\r\n P(9) - z scaling\r\n P(10) - x affine\r\n P(11) - y affine\r\n P(12) - z affine\r\n\r\n order (optional) application order of transformations.\r\n\r\n A - affine transformation matrix\r\n ___________________________________________________________________________\r\n\r\n spm_matrix returns a matrix defining an orthogonal linear (translation,\r\n rotation, scaling or affine) transformation given a vector of\r\n parameters (P). By default, the transformations are applied in the\r\n following order (i.e., the opposite to which they are specified):\r\n\r\n 1) shear\r\n 2) scale (zoom)\r\n 3) rotation - yaw, roll & pitch\r\n 4) translation\r\n\r\n This order can be changed by calling spm_matrix with a string as a\r\n second argument. This string may contain any valid MATLAB expression\r\n that returns a 4x4 matrix after evaluation. The special characters 'S',\r\n 'Z', 'R', 'T' can be used to reference the transformations 1)-4)\r\n above. The default order is 'T*R*Z*S', as described above.\r\n\r\n SPM uses a PRE-multiplication format i.e. Y = A*X where X and Y are 4 x n\r\n matrices of n coordinates.\r\n\r\n __________________________________________________________________________\r\n Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging\r\n\r\n Karl Friston\r\n $Id: spm_matrix.m 1149 2008-02-14 14:29:04Z volkmar $\r\n \"\"\"\r\n # pad P with 'null' parameters\r\n #---------------------------------------------------------------------------\r\n q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])\r\n P = np.concatenate((P,q[(np.size(P) + 1):12]))\r\n\r\n # Transformation matrices\r\n T = np.array([[1,0,0,P[0]], # translation\r\n [0,1,0,P[1]],\r\n [0,0,1,P[2]],\r\n [0,0,0,1]])\r\n\r\n R1 = np.array([[1, 0, 0, 0],\r\n [0, np.cos(P[3]), np.sin(P[3]), 0],\r\n [0, -np.sin(P[3]), np.cos(P[3]), 0],\r\n [0, 0, 0, 1]])\r\n\r\n R2 = np.array([[np.cos(P[4]), 0, np.sin(P[4]), 0],\r\n [0, 1, 0, 0],\r\n [-np.sin(P[4]), 0, np.cos(P[4]), 0],\r\n [0, 0, 0, 1]])\r\n\r\n R3 = np.array([[np.cos(P[5]), np.sin(P[5]), 0, 0],\r\n [-np.sin(P[5]), np.cos(P[5]), 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]])\r\n\r\n R = R1.dot(R2).dot(R3) # rotation\r\n\r\n Z = np.array([[P[6], 0, 0, 0], # scale\r\n [0, P[7], 0, 0],\r\n [0, 0, P[8], 0,],\r\n [0, 0, 0, 1]])\r\n\r\n S = np.array([[1, P[9], P[10], 0], # shear\r\n [0, 1, P[11], 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]])\r\n\r\n # order = '('+order.replace('*', ').dot(')+')' # convert to np dot product\r\n # A = eval(order)\r\n v = {'T':T, 'R':R, 'Z':Z, 'S':S}\r\n l = order.split('*')\r\n A = v[l[0]].dot(v[l[1]]).dot(v[l[2]]).dot(v[l[3]])\r\n\r\ndef ndim_rotation_matrix(x, y):\r\n \"\"\"\r\n Implemented based on MATLAB code from\r\n https://math.stackexchange.com/questions/598750/finding-the-rotation-matrix-in-n-dimensions\r\n\r\n x, y are n-dimensional column vectors\r\n\r\n u = x / |x|\r\n v = y - (u'*y).*u\r\n v = v / |v|\r\n\r\n cos(theta) = x' * y / (|x| |y|)\r\n sin(theta) = sqrt(1-cos(theta)^2)\r\n\r\n R = I - u*u' - v*v' + [u, v] R_theta [u, v]'\r\n \"\"\"\r\n u = x / np.linalg.norm(x, ord=2)\r\n v = y - u.T.dot(y)*(u)\r\n v = v / np.linalg.norm(v, ord=2)\r\n\r\n cost = float(x.T.dot(y) / (np.linalg.norm(x, ord=2) * np.linalg.norm(y, ord=2)))\r\n sint = float(np.sqrt(1-cost**2))\r\n\r\n R = np.eye(x.shape[0]) - u.dot(u.T) - v.dot(v.T) + \\\r\n np.c_[u, v].dot(np.array([[cost, -sint], [sint, cost]])).dot(np.c_[u, v].T)\r\n\r\n return R\r\n"
},
{
"alpha_fraction": 0.5125852823257446,
"alphanum_fraction": 0.5250529050827026,
"avg_line_length": 38.09434127807617,
"blob_id": "d650c0999c1f4359120685d61e8a4ed02f69fbd5",
"content_id": "e19d9b11746dd86a63d71561d82c22bf1edb3485",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8502,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 212,
"path": "/Plots/archive/PublicationFigures_old.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 06 13:35:08 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport re\r\nimport numpy as np\r\n#import matplotlib\r\n#matplotlib.use('Agg') # use 'Agg' backend\r\nimport matplotlib.pyplot as plt\r\nfrom beeswarm import *\r\n\r\ndataFile = 'C:/Users/Edward/Documents/Assignments/Scripts/Python/Plots/beeswarm.txt'\r\n\r\nclass FigureData(object):\r\n \"\"\"Parse input text file data for figures\r\n \"\"\"\r\n def __init__(self, dataFile=None):\r\n \"\"\"Initialize class\"\"\"\r\n self.series = dict()\r\n self.names = dict()\r\n self.num = []# count number of data sets\r\n if dataFile is not None and isinstance(dataFile, str):\r\n self.loadData(dataFile)\r\n\r\n def loadData(self, dataFile):\r\n \"\"\"Load data in text file\"\"\"\r\n with open(dataFile, 'rb') as fid:\r\n for line in fid: # iterate each line\r\n if line[0] == \"#\":\r\n continue # skip comments\r\n # split comma delimited string\r\n # series code, series name,@datatype, data1, data2, data3, ...\r\n lst = line.strip().split(',')\r\n # Parse data series name\r\n self.names[lst[1]] = lst[2][1:-1]\r\n # Parse data\r\n if lst[0][1:] == 'str':\r\n self.series[lst[1]] = np.array(lst[3:])\r\n elif lst[0][1:] == 'float':\r\n self.series[lst[1]] = np.array(lst[3:]).astype(np.float)\r\n elif lst[0][1:] == 'int':\r\n self.series[lst[1]] = np.array(lst[3:]).astype(np.int)\r\n else: # unrecognized type\r\n BaseException('Unrecognized data type')\r\n fid.close()\r\n # Parse number of data set\r\n for x in self.series.keys():\r\n self.num.extend(map(int, re.findall(r'\\d+',x)))\r\n self.num = max(self.num)\r\n\r\n\r\nclass PublicationFigures(FigureData):\r\n \"\"\"Generate publicatino quantlity figures\r\n Data: FigureData, or data file path\r\n PlotType: currently supported plot types include:\r\n ~ LinePlot: for categorical data, with error bar\r\n Style: \r\n 'Twin' -- Same plot, 2 y-axis (left and right of plot)\r\n 'Vstacked' (default) -- vertically stacked subplots\r\n ~ Beeswarm: beeswarm plot; boxplot with scatter points\r\n Style: 'hex','swarm' (default),'center','square'\r\n '\r\n \"\"\"\r\n def __init__(self, Data=None, PlotType=None, Style=None, SavePath=None):\r\n \"\"\"Initialize class \r\n \"\"\"\r\n if Data is None:\r\n return\r\n elif isinstance(Data, str):\r\n self.data = FigureData(Data) # load data\r\n elif isinstance(Data, FigureData):\r\n self.data = Data\r\n self.PlotType = PlotType\r\n self.Style = Style\r\n self.SavePath = SavePath\r\n # Do Plots\r\n self.DoPlots()\r\n \r\n def DoPlots(self):\r\n # Switch between plots\r\n if self.PlotType is None:\r\n return\r\n if self.PlotType == 'LinePlot':\r\n if self.Style is None:\r\n self.Style = 'Vstack'\r\n if self.Style == 'Twin':\r\n self.LinePlotTwin()\r\n elif self.Style == 'Vstack':\r\n self.LinePlotVstack()\r\n # Fix style\r\n self.SetLinePlotStyle()\r\n elif self.PlotType == 'Beeswarm':\r\n if self.Style is None:\r\n self.Style = 'swarm'\r\n self.Beeswarm()\r\n else: # unrecognized plot type\r\n BaseException('Unsupported Plot Type')\r\n #self.fig.show()\r\n if self.SavePath is not None:\r\n self.fig.savefig(self.SavePath)\r\n \r\n def Beeswarm(self):\r\n colors = ['red','cyan','green','magenta','blue','black']\r\n # boardcasting color cycle\r\n colors = self.data.num/len(colors)*colors+colors[0:self.data.num%len(colors)]\r\n # Data\r\n datavect = []\r\n [datavect.append(self.data.series['y'+str(n+1)]) for n in range(self.data.num)]\r\n print(datavect)\r\n # names\r\n datanames = []\r\n [datanames.append(self.data.names['y'+str(n+1)]) for n in range(self.data.num)]\r\n self.bs, ax = beeswarm(datavect, method=self.Style, \r\n labels=datanames, \r\n col=colors)\r\n # Format style\r\n # make sure axis tickmark points out\r\n ax.tick_params(axis='both',direction='out')\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.set_ticks_position('left')\r\n # save current figure handle\r\n self.fig = plt.gcf()\r\n self.axs = ax\r\n # Do annotation: compare significance\r\n X = ax.get_xticks()\r\n Y = [max(x) for x in datavect]\r\n self.label_diff(0,1,'p=0.0370',X,Y)\r\n self.label_diff(1,2,'p<0.0001',X,Y)\r\n\r\n def label_diff(self, i,j,text,X,Y):\r\n # Custom function to draw the diff bars\r\n x = (X[i]+X[j])/2\r\n y = 1.1*max(Y[i], Y[j])\r\n dx = abs(X[i]-X[j])\r\n\r\n props = {'connectionstyle':'bar','arrowstyle':'-',\\\r\n 'shrinkA':20,'shrinkB':20,'lw':2}\r\n self.axs.annotate(text, xy=(X[i],y+7), zorder=10)\r\n self.axs.annotate('', xy=(X[i],y), xytext=(X[j],y), arrowprops=props)\r\n\r\n\r\n \r\n def LinePlotTwin(self):\r\n self.x = range(1,len(self.data.series['x1'])+1)\r\n self.fig, self.axs = plt.subplots(nrows=1,ncols=1, sharex=True)\r\n self.axs = [self.axs, self.axs.twinx()]\r\n colors = ('k','r')\r\n spineName = ('left','right')\r\n for n, ax in enumerate(self.axs):\r\n # Plot error bar\r\n ax.errorbar(self.x,self.data.series['y'+str(n+1)],\r\n yerr = [self.data.series['yebp'+str(n+1)],\r\n self.data.series['yebn'+str(n+1)]], color=colors[n])\r\n # For twin Plot\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.label.set_color(colors[n])\r\n ax.tick_params(axis='y',colors=colors[n])\r\n ax.spines[spineName[n]].set_color(colors[n])\r\n self.axs[0].set_xlabel(self.data.names['x1'])\r\n\r\n def LinePlotVstack(self):\r\n self.x = range(1,len(self.data.series['x1'])+1)\r\n self.fig, self.axs = plt.subplots(nrows=self.data.num, ncols=1,sharex=True)\r\n for n, ax in enumerate(self.axs):\r\n # Plot error bar\r\n ax.errorbar(self.x,self.data.series['y'+str(n+1)],\r\n yerr = [self.data.series['yebp'+str(n+1)],\r\n self.data.series['yebn'+str(n+1)]], color='k')\r\n # For Vstack\r\n ax.yaxis.set_ticks_position('left')\r\n ax.spines['right'].set_visible(False)\r\n if n < (self.data.num-1): # first several plots\r\n ax.xaxis.set_ticks_position('none')\r\n ax.spines['bottom'].set_visible(False)\r\n else: # last plot\r\n ax.xaxis.set_ticks_position('bottom')\r\n self.axs[-1].set_xlabel(self.data.names['x1'])\r\n\r\n def SetLinePlotStyle(self):\r\n for n, ax in enumerate(self.axs):\r\n # Set ylabel\r\n ax.set_ylabel(self.data.names['y'+str(n+1)])\r\n # make sure axis tickmark points out\r\n ax.tick_params(axis='both',direction='out')\r\n # Set axis visibility\r\n ax.spines['top'].set_visible(False)\r\n # change the x lim on the last, most buttom subplot\r\n ax.set_xlim([0,len(self.data.series['x1'])+1])\r\n plt.xticks(self.x, self.data.series['x1'])\r\n # Add some margins to the plot so that it is not touching the axes\r\n plt.margins(0.02,0.02)\r\n self.fig.tight_layout() # enforce tight layout\r\n# \r\n# def TimeSeries(self):\r\n# \"\"\"Plot traces\r\n# Style:\r\n# 'Classic': \r\n# 'Vstack': plots stacked vertically\r\n# \"\"\"\r\n \r\nif __name__ == \"__main__\":\r\n #K = PublicationFigures(Data=dataFile,PlotType='LinePlot',Style='Twin',SavePath='C:/QQDownload/asdf_twin.eps')\r\n # Allows further tuning\r\n # to set the ylim\r\n #K.axs[0].set_ylim([0,2.0])\r\n #K.axs[1].set_ylim([0.05, 0.25])\r\n K = PublicationFigures(Data=dataFile,PlotType='Beeswarm', Style='swarm', SavePath='C:/QQDownload/asdf_beeswarm.png')\r\n K.axs.set_ylim([-3,7])\r\n\r\n"
},
{
"alpha_fraction": 0.6480855941772461,
"alphanum_fraction": 0.6745495200157166,
"avg_line_length": 31.771217346191406,
"blob_id": "955e95440fa5a132f0038879ba90f6fa7bb29e5c",
"content_id": "ca8f4807f8b57c7701ba1201a1459b50a4a59d56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8880,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 271,
"path": "/python_tutorials/practice_notes_3.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\n# Day 3: November 25, 2012\n\n# Strings\n#Note for defining a string\n#I personally perfer using double quotes, since single quotes are not good\n#when the string contains possessive- or contraction-like words, for instance\n#>>>ABCD='I'd like this to go';\n#>>> File \"<console>\", line 1\n#>>> ABCD='I'd like this to go';\n#>>> ^\n#>>>SyntaxError: invalid syntax\n#However, it is legal to do\n#>>>ABCD=\"I'd like this to go\";\n#Nonetheless, if the string do contain double quotation mark, it looks like\n#we still have to switch to single quotation mark.\n#The question would be what if the string contains both double and single quotation mark?\n#That is, how to define a string with such sentence: I'd like to ask \"him\" about it.\n#One way, perhaps is to do concatenation\na_word=\"asdfghjkl;\";\nfor i in range(0,len(a_word),1):\n print(i,\":\",a_word[i]);#this should print out each letter of the string stored in a_word, forward\n\n#This can be done in another way\nfor i in a_word:\n print(i);#this should print out each letter of the stringg stored in a_word, forward\n\nfor j in range(1,len(a_word)+1,1):\n print(-j,\":\",a_word[-j]);#this should print out each letter of the string stored in a_word, backwards\n\n#String Indexing\na_word[0:3];#or equivalently\na_word[:3];\n#both lines should print out the string up to its index 3-1 (total 3 letters)\n#>>>'asd'\na_word[3:len(a_word)];#or equivalently\na_word[3:];\n#both lines should print out the string from its index 3 to the end (total len(a_word)-3 letters)\n#>>>'fghjkl;'\na_word[:];#this should print out the whole string, equivalent to print a_word directly\n#Important Note: unlike MATLAB, string in Python are not treated as a matrix/vector\n#Strings in Python is immutable, meaning its elements cannot be changed\n#Therefore, it will be an error to write\n#a_word[3]=\"K\";\n#>>>TypeError: 'str' object does not support item assignment\n\n#String Method\nb_word = \"banana\";\nnew_b_word = b_word.upper();#this \"S.upper\" method converts all lowercase letter to uppercase letter\nprint(new_b_word);\n#>>>'BANANA'\nA_index = b_word.find('an',3,10);\nprint(A_index);\n#this \"S.find(substring,start,end)\" method should find the lowest index of specified substring\n#notice that even if the end index exceeds the length of the string,\n#unlike MATLAB, there will be no message indicating that index exceeds dimension\n#if there is no such substring within the searched string, returns -1\nthe_word=\"BANANANa\";\nthe_word.isupper();#returns true if the string ONLY contains uppercase letter\n#>>>False\n\n#Another note on the notation of help documentation/calltips of Python functions and methods\n#for instance, S.find(sub[, start[, end]])\n#The notation indicates that \"sub\" (since it is outside the bracket) is required,\n#whereas \"start\" is optional (since it is inside a bracket).\n#However, once start is specified, \"end\" is now optional\n#In another words, \"end\" cannot be specified without \"start\"\n\n#The \"in\" Operator\n#This operator checkes if the string specified before the \"in\" operator\n#is the substring of the string specified after the \"in\" operator\n\"a\" in \"banana\";\n#>>>True\n\"seed\" in \"banana\";\n#>>>False\n\n#String Comparison\n\"A\"<\"B\" and \"B\"<\"C\"\n#>>>True\n#Both statements are true, which makes the entire line true\n#Strings are compared directly as numbers\n#the number that each character corresponds to ASCII\n\"a\">\"A\"\n#>>>True\n\"b\"<\"B\"\n#>>>False\n# Just like Java, when comparing strings with multiple letters,\n# Python compares the first letter of each word, if they are the same,\n# Python goes to the second letter, and then compare them.\n# A list of words can be organized in such a way\n######################################################################################################################\n\n# Lists\nList_A=[\"asdf\",\"jkl;\",\"Such a good weather\"];#list of strings\nList_B=[1,3,4,12,234];#list of integers\nList_C=[];#empty list\nList_Mixed=[1,2.3424,\"sanskrit\",23,\"floating above\", 3.242,\"12.23\"];#mixed different types\n#Lists are mutable\nList_Mixed[2]='not a sanskrit';\n#>>>[1, 2.3424, 'not a sanskrit', 23, 'floating above', 3.242, '12.23']\n\n#\"in\" operator for lists\n\"sanskrit\" in List_Mixed\n#>>>False\n\"not a sanskrit\" in List_Mixed\n#>>>True\n\n#Nested Lists\nList_nested=[\"good\",1.234,[\"bad\",3.1234,32],[2,3,4,5,6]];\nList_nested[2];#return the second element of the list List_nested\n#>>>['bad', 3.1234, 32]\nList_nested[2][1];#call the index 2 element of the list List_nested, then from the returned element, call its index 1 element\n#>>>3.1234\n\n#List Operatoions\na=[1,2,3];\nb=[4,5,6];\nc=a+b;#concatenating a and b\nprint(c);\n#>>>[1,2,3,4,5,6]\nd=a*4;#repeat a 4 times in the new list\nprint(d);\n#>>>[1,2,3,1,2,3,1,2,3]\n\n#List indexing--very similar to string indexing\nf=c[:3];\nprint(f);\n#>>>[1, 2, 3]\nt=c[3:];\nprint(t);\n#>>>[4, 5, 6]\ns=c[:];\nprint(s);\n#>>>[1, 2, 3, 4, 5, 6]\n\n#List Methods\nt=['a','b','c'];\nt.append('d');#appending another element to the end of the list, void method\nprint(t);\n#>>>['a', 'b', 'c', 'd']\n#compare to\nt.append(['e','f','g']);\nprint(t);\n#>>>['a', 'b', 'c', 'd', ['e', 'f', 'g']]\n#To append each element of another list to a list, use extend\nt1=['a','b','c'];\nt2=['d','e','f','g'];\nt1.extend(t2);#appending each element of t2 to t1, void method\nprint(t1);\n#>>>['a', 'b', 'c', 'd', 'e', 'f', 'g']\nt=['adf','gdasdf','deas','adsff','ggas'];\nt.sort();#void method\nprint(t);#sort the list\n#>>>['adf', 'adsff', 'deas', 'gdasdf', 'ggas']\n\n#Map, filter, and reduce\n#one way to sum up all the elements in the list\ndef add_all(t):\n total=0;\n for x in t:\n total+=x;#same as JAVA, equivalent to total = total+x;\n return total\n\nt=[1,2,3,4,5];\nsum_all=add_all(t);\nprint(sum_all);\n\n#A simpler way to add all elements is using sum()\nsum(t);\n#Reduce: an operation that combines all the element in a list into a single value\n#accumulator: a variable that accumulates the result of each iteration when transversing through a list\n#map: an operation that \"maps\" a function to each element in a sequence\n\n#Deleting elements\n#If we know the index of the element\nt=['a','b','c','d','e'];\nx=t.pop(1);#returns the element being deleted, and modify t after deleting\n#List.pop([index]), default_index is the index of the last element\nprint(t);\n#>>>['a', 'c', 'd', 'e']\nprint(x);\n#>>>b\n#using del() operator gives the same effect\nt=['a','b','c','d','e'];\ndel(t[1:3]);#delete up to but not including index 3 elements, so, only index 1 and 2 are deleted\nprint(t);\n#On the other hand, if we know the element itself but not the index of it\nt=['a','b','c','d','e'];\nt.remove('b');#void method\nprint(t);\n\n#converting between lists and strings\ns=\"spam\";\nt=list(s);#convert each letter of s into a list of letterz\nprint(t);\n#>>>['s', 'p', 'a', 'm']\ns=\"pining for the fjords\";\nt=s.split();#S.split([sep [,maxsplit]]), default_sep = \" \" space, can set maximum number of split\nprint(t);\n#>>>['pining', 'for', 'the', 'fjords']\nt=['pining', 'for', 'the', 'fjords'];\ndelimiter=\" \";\ns=delimiter.join(t);#join the list of words with delimiter\nprint(s);\n\n# Objects and values\na=\"banana\";\nb=\"banana\";\na is b;#checks if two objects are identical\n#>>>True\n#This means a and b are the same objects, and of course, with the same value\n#However,\na=[1,2,3];\nb=[1,2,3];\na is b;#checks if two objects are identical\n#>>>False\n#This means that even though a and b have the same value, they are different objects\n#Instead, list a and list b are called \"equivalent\", whereas string a and string b are called \"identical\"\n\n#In comparison\na=[1,2,3];\nb=a;\na is b;\n#>>>True\n#We call a is being aliased by b\n#if the aliased object is mutable, then change of the aliased object affects all of its alias\na[2]=100;\nprint(a);\n#>>>[1, 2, 100]\nprint(b);\n#>>>[1, 2, 100]\n#Notice that b is also changed even though we did not modify it\n#This is very different from MATLAB!!!\n#Thi must be noted carefully when coding, since it is so error prone\n#The question is how to work around this. Look at the following example for some hints\n\ndef give_tail(t):\n return t[1:];#this returns a NEW list, with the original t unmodified\n\nt=[1,2,3,4,5,6];\ns=give_tail(t);\nprint(t);\n#>>>[1, 2, 3, 4, 5, 6]\nprint(s);\n#>>>[2, 3, 4, 5, 6]\n\n#The wrong way to define the function\ndef bad_give_tail(t):\n t=t[1:];#trying to reassign t, but t does not change.\n\nt=[1,2,3,4,5,6];\ns=bad_give_tail(t);\nprint(t);\n#>>>[1, 2, 3, 4, 5, 6]\nprint(s);\n#>>>None\n#In another word, without the \"return\" statement, the function is a void function\n#On the contraray, MATLAB does a better job at this, without the need to worry about aliasing.\n\n#To create copies of the original list, use this:\nt=[1,2,3,4,5,6];\noriginal_list=t[:];#create a copy of the original list, without aliasing\nt is original_list;#test to see if they are the same list\n#>>>False\nt.append(7);\nprint(t);\n#>>>[1, 2, 3, 4, 5, 6, 7]\nprint(original_list);\n#>>>[1, 2, 3, 4, 5, 6]\n\n#This concludes today's study."
},
{
"alpha_fraction": 0.5577943325042725,
"alphanum_fraction": 0.5725409984588623,
"avg_line_length": 40.44008255004883,
"blob_id": "cf62524016656715ea4088b54ad4ed6f216ba1f8",
"content_id": "31dfa5c7b02f058c2e7f09348195a5e0a5490ed8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20547,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 484,
"path": "/Plots/plots.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 18 02:21:18 2016\r\n\r\nGeneral utilities for plotting\r\n\r\n@author: Edward\r\n\"\"\"\r\nimport sys\r\nimport os\r\nimport numpy as np\r\n\r\n\r\nimport os\r\nimport signal\r\nimport subprocess\r\nimport time\r\nfrom pdb import set_trace\r\nfrom scipy import stats\r\n\r\nimport matplotlib as mpl\r\n# mpl.use('PS')\r\n#mpl.rcParams['pdf.fonttype'] = 42\r\n#mpl.rcParams['ps.fonttype'] = 42\r\nmpl.rcParams['svg.fonttype'] = 'none'\r\n#mpl.rcParams['ps.useafm'] = True\r\n#mpl.rcParams['pdf.use14corefonts'] = True\r\n#mpl.rcParams['text.usetex'] = True\r\n# mpl.rcParams['text.usetex'] = True\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.font_manager as fm\r\n\r\n\r\n# some predetermined parameters\r\nfontsize = {'title':10, 'xlab':8, 'ylab':8, 'xtick':5,'ytick':5,'texts':5,\r\n 'legend': 5, 'legendtitle':6, 'xminortick':5, 'yminortick':5, \"colorbartick\": 5} # font size\r\n\r\n# unit in points. This corresponds to 0.25mm (1pt = 1/72 inch)\r\nbar_line_property = {'border': 0.70866144, 'h_err_bar': 0.70866144, 'v_err_bar': 0.70866144, \r\n 'xaxis_tick': 0.70866144, 'yaxis_tick': 0.70866144, 'xaxis_spine': 0.70866144,\r\n 'yaxis_spine': 0.70866144} # in mm.\r\n\r\n\r\ndef SetFont(ax, fig, fontsize=12,fontname='Arial',items=None):\r\n \"\"\"Change font properties of all axes\r\n ax: which axis or axes to change the font. Default all axis in current\r\n instance. To skip axis, input as [].\r\n fig: figure handle to change the font (text in figure, not in axis).\r\n Default is any text items in current instance. To skip, input as [].\r\n fontsize: size of the font, specified in the global variable\r\n fontname: fullpath of the font, specified in the global variable\r\n items: select a list of items to change font. ['title', 'xlab','ylab',\r\n 'xtick','ytick', 'texts','legend','legendtitle']\r\n \r\n \"\"\"\r\n \r\n def get_ax_items(ax):\r\n \"\"\"Parse axis items\"\"\"\r\n itemDict={'title':[ax.title], 'xlab':[ax.xaxis.label],\r\n 'ylab':[ax.yaxis.label], 'xtick':ax.get_xticklabels(),\r\n 'ytick':ax.get_yticklabels(),\r\n 'xminortick': ax.get_xminorticklabels(),\r\n 'yminortick': ax.get_yminorticklabels(),\r\n 'texts':ax.texts if isinstance(ax.texts,(np.ndarray,list))\r\n else [ax.texts],\r\n 'legend': [] if ax.legend_ is None\r\n else ax.legend_.get_texts(),\r\n 'legendtitle':[] if ax.legend_ is None\r\n else [ax.legend_.get_title()]\r\n }\r\n itemList, keyList = [], []\r\n if items is None: # get all items\r\n for k, v in iter(itemDict.items()):\r\n itemList += v\r\n keyList += [k]*len(v)\r\n else: # get only specified item\r\n for k in items:\r\n itemList += itemDict[k] # add only specified in items\r\n keyList += [k]*len(itemDict[k])\r\n \r\n return(itemList, keyList)\r\n \r\n def get_fig_items(fig):\r\n \"\"\"Parse figure text items\"\"\"\r\n itemList = fig.texts if isinstance(fig.texts,(np.ndarray,list)) \\\r\n else [fig.texts]\r\n keyList = ['texts'] * len(itemList)\r\n \r\n return(itemList, keyList)\r\n \r\n def CF(itemList, keyList):\r\n \"\"\"Change font given item\"\"\"\r\n # initialize fontprop object\r\n fontprop = fm.FontProperties(style='normal', weight='normal',\r\n stretch = 'normal')\r\n if os.path.isfile(fontname): # check if font is a file\r\n fontprop.set_file(fontname)\r\n else:# check if the name of font is available in the system\r\n if not any([fontname.lower() in a.lower() for a in\r\n fm.findSystemFonts(fontpaths=None, fontext='ttf')]):\r\n print('Cannot find specified font: %s' %(fontname))\r\n fontprop.set_family(fontname) # set font name\r\n # set font for each object\r\n for n, item in enumerate(itemList):\r\n if isinstance(fontsize, dict):\r\n if keyList[n] in fontsize.keys():\r\n fontprop.set_size(fontsize[keyList[n]])\r\n else:\r\n pass\r\n # print('Warning font property {} not in specified fontsize. Font is kept at defualt.'.format(keyList[n]))\r\n elif n <1: # set the properties only once\r\n fontprop.set_size(fontsize)\r\n item.set_fontproperties(fontprop) # change font for all items\r\n \r\n def CF_ax(ax): # combine CF and get_ax_items\r\n if not ax: # true when empty or None\r\n return # skip axis font change\r\n itemList, keyList = get_ax_items(ax)\r\n CF(itemList, keyList)\r\n \r\n def CF_fig(fig): # combine CF and get_fig_items\r\n if not fig: # true when empty or None\r\n return # skip figure font change\r\n itemsList, keyList = get_fig_items(fig)\r\n CF(itemsList, keyList)\r\n \r\n # vecotirze the closure\r\n CF_ax_vec = np.frompyfunc(CF_ax, 1,1)\r\n CF_fig_vec = np.frompyfunc(CF_fig, 1,1)\r\n \r\n # Do the actual font change\r\n CF_ax_vec(ax)\r\n CF_fig_vec(fig)\r\n\r\n \r\ndef AdjustAxs(otypes=[np.ndarray], excluded=None):\r\n \"\"\"Used as a decorator to set the axis properties\"\"\"\r\n def wrap(func):\r\n # vectorize the func so that it can be applied to single axis or\r\n # multiple axes\r\n func_vec = np.vectorize(func, otypes=otypes, excluded=excluded)\r\n def wrapper(ax, *args, **kwargs):\r\n res = func_vec(ax, *args, **kwargs)\r\n return(res)\r\n return(wrapper)\r\n return(wrap)\r\n \r\ndef SetAxisOrigin(ax, xcenter='origin', ycenter='origin', xspine='bottom', yspine='left'):\r\n \"\"\"Set the origin of the axis\"\"\"\r\n if xcenter == 'origin':\r\n xtick = ax.get_xticks()\r\n if max(xtick)<0:\r\n xcenter = max(xtick)\r\n elif min(xtick)>0:\r\n xcenter = min(xtick)\r\n else:\r\n xcenter = 0\r\n \r\n if ycenter == 'origin':\r\n ytick = ax.get_yticks()\r\n if max(ytick)<0:\r\n ycenter = max(ytick)\r\n elif min(ytick)>0:\r\n ycenter = min(ycenter)\r\n else:\r\n ycenter = 0\r\n \r\n xoffspine = 'top' if xspine == 'bottom' else 'bottom' \r\n yoffspine = 'right' if yspine=='left' else 'left'\r\n \r\n \r\n ax.spines[xspine].set_position(('data', ycenter))\r\n ax.spines[yspine].set_position(('data', xcenter))\r\n ax.spines[xoffspine].set_visible(False)\r\n ax.spines[yoffspine].set_visible(False)\r\n ax.xaxis.set_ticks_position(xspine)\r\n ax.yaxis.set_ticks_position(yspine)\r\n ax.spines[xspine].set_capstyle('butt')\r\n ax.spines[yspine].set_capstyle('butt')\r\n \r\ndef xysize_pt2data(ax, s=None, dpi=None, scale=(1,1)):\r\n \"\"\" Determine dot size in data axis.\r\n scale: helps further increasing space between dots\r\n s: font size in points\r\n \"\"\"\r\n figw, figh = ax.get_figure().get_size_inches() # figure width, height in inch\r\n dpi = float(ax.get_figure().get_dpi()) if dpi is None else float(dpi)\r\n w = (ax.get_position().xmax-ax.get_position().xmin)*figw # axis width in inch\r\n h = (ax.get_position().ymax-ax.get_position().ymin)*figh # axis height in inch\r\n xran = ax.get_xlim()[1]-ax.get_xlim()[0] # axis width in data\r\n yran = ax.get_ylim()[1]-ax.get_ylim()[0] # axis height in data\r\n \r\n xsize=np.sqrt(s)/dpi*xran/w*scale[0] # xscale * proportion of xwidth in data\r\n ysize=np.sqrt(s)/dpi*yran/h*scale[1] # yscale * proportion of yheight in data\r\n\r\n return xsize, ysize\r\n \r\ndef mm2pt(mm):\r\n return mm/10.0/2.54*72 # mm->cm-->inch-->pt\r\n \r\ndef xysize_data2pt(ax, p=None, dpi=None, scale=(1,1)):\r\n \"\"\"Convert from data size to font size in points\r\n p: data size \r\n \"\"\"\r\n figw, figh = ax.get_figure().get_size_inches() # figure width, height in inch\r\n dpi = float(ax.get_figure().get_dpi()) if dpi is None else float(dpi)\r\n w = (ax.get_position().xmax-ax.get_position().xmin)*figw # axis width in inch\r\n h = (ax.get_position().ymax-ax.get_position().ymin)*figh # axis height in inch\r\n xran = ax.get_xlim()[1]-ax.get_xlim()[0] # axis width in data\r\n yran = ax.get_ylim()[1]-ax.get_ylim()[0] # axis height in data\r\n \r\n xsize = (p*w*dpi/xran*scale[0])**2\r\n ysize = (p*h*dpi/yran*scale[1])**2\r\n \r\n return xsize, ysize\r\n \r\ndef AdjustCategoricalXAxis(ax, pad=(0.5,0.5), categorytickon=False):\r\n \"\"\"Additional settings for plots with categorical data\"\"\"\r\n # change the x lim on the last, most buttom subplot\r\n ax.set_xlim(ax.get_xticks()[0]-pad[0],ax.get_xlim()[-1])\r\n ax.set_xlim(ax.get_xlim()[0], ax.get_xticks()[-1]+pad[1])\r\n if not categorytickon:\r\n ax.tick_params(axis='x', bottom='off')\r\n \r\ndef AdjustCategoricalYAxis(ax, pad=(0.5,0.5), categorytickon=False):\r\n \"\"\"Additional settings for plots with categorical data\"\"\"\r\n ax.set_ylim(ax.get_yticks()[0]-pad[0],ax.get_ylim()[-1])\r\n ax.set_ylim(ax.get_ylim()[0], ax.get_yticks()[-1]+pad[1])\r\n if not categorytickon:\r\n ax.tick_params(axis='y', left='off')\r\n\r\n@AdjustAxs()\r\ndef SetDefaultAxis(ax):\r\n \"\"\"Set default axis appearance\"\"\"\r\n ax.tick_params(axis='both',direction='out')\r\n ax.spines['left'].set_visible(True)\r\n ax.spines['left'].set_capstyle('butt')\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['bottom'].set_visible(True)\r\n ax.spines['bottom'].set_capstyle('butt')\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.set_ticks_position('left')\r\n\r\n@AdjustAxs()\r\ndef SetDefaultAxis3D(ax, elev=45, azim=60, dist=12):\r\n ax.tick_params(axis='both', direction='out')\r\n ax.view_init(elev=elev, azim=azim) # set perspective\r\n ax.dist = dist # use default axis distance 10\r\n if ax.azim > 0: # z axis will be on the left\r\n ax.zaxis.set_rotate_label(False) # prevent auto rotation\r\n a = ax.zaxis.label.get_rotation()\r\n ax.zaxis.label.set_rotation(90+a) # set custom rotation\r\n ax.invert_xaxis() # make sure (0,0) in front\r\n ax.invert_yaxis() # make sure (0,0) in front\r\n else:\r\n ax.invert_xaxis() # make sure (0,0) in front\r\n #ax.zaxis.label.set_color('red')\r\n #ax.yaxis._axinfo['label']['space_factor'] = 2.8\r\n\r\n@AdjustAxs()\r\ndef equalAxLineWidth(ax, lineproperty ={'xaxis_tick': 0.70866144, \r\n 'yaxis_tick': 0.70866144, \r\n 'xaxis_spine': 0.70866144, \r\n 'yaxis_spine': 0.70866144}):\r\n ax.spines['left'].set_linewidth(bar_line_property['xaxis_spine'])\r\n ax.spines['right'].set_linewidth(bar_line_property['xaxis_spine'])\r\n ax.spines['bottom'].set_linewidth(bar_line_property['yaxis_spine'])\r\n ax.spines['top'].set_linewidth(bar_line_property['yaxis_spine'])\r\n ax.xaxis.set_tick_params(width=bar_line_property['xaxis_tick'])\r\n ax.yaxis.set_tick_params(width=bar_line_property['yaxis_tick'])\r\n \r\n@AdjustAxs()\r\ndef setAxisLineStyle(ax, lineproperty={'xaxis_tick_capstyle':'projecting',\r\n 'xaxis_tick_joinstyle':'miter',\r\n 'yaxis_tick_capstyle':'projecting',\r\n 'yaxis_tick_joinstyle':'miter',\r\n 'xaxis_spine_capstyle':'projecting',\r\n 'xaxis_spine_joinstyle':'miter',\r\n 'yaxis_spine_capstyle':'projecting',\r\n 'yaxis_spine_joinstyle':'miter',\r\n }):\r\n # Ticks\r\n for i in ax.xaxis.get_ticklines(): \r\n i._marker._capstyle = lineproperty['xaxis_tick_capstyle']\r\n i._marker._joinstyle = lineproperty['xaxis_tick_joinstyle']\r\n \r\n for i in ax.yaxis.get_ticklines():\r\n i._marker._capstyle = lineproperty['yaxis_tick_capstyle']\r\n i._marker._joinstyle = lineproperty['yaxis_tick_joinstyle']\r\n \r\n # Spines\r\n ax.spines['left']._capstyle = lineproperty['yaxis_spine_capstyle']\r\n ax.spines['left']._joinstyle = lineproperty['yaxis_spine_joinstyle']\r\n ax.spines['right']._capstyle = lineproperty['yaxis_spine_capstyle']\r\n ax.spines['right']._joinstyle = lineproperty['yaxis_spine_joinstyle']\r\n ax.spines['top']._capstyle = lineproperty['xaxis_spine_capstyle']\r\n ax.spines['top']._joinstyle = lineproperty['xaxis_spine_joinstyle']\r\n ax.spines['bottom']._capstyle = lineproperty['xaxis_spine_capstyle']\r\n ax.spines['bottom']._joinstyle = lineproperty['xaxis_spine_joinstyle']\r\n\r\n@AdjustAxs()\r\ndef defaultAxisStyle(ax):\r\n equalAxLineWidth(ax)\r\n setAxisLineStyle(ax)\r\n\r\n \r\ndef add_subplot_axes(ax,rect,axisbg='w'):\r\n \"\"\"Adding subplot within a plot\"\"\"\r\n fig = plt.gcf()\r\n box = ax.get_position()\r\n width = box.width\r\n height = box.height\r\n inax_position = ax.transAxes.transform(rect[0:2])\r\n transFigure = fig.transFigure.inverted()\r\n infig_position = transFigure.transform(inax_position) \r\n x = infig_position[0]\r\n y = infig_position[1]\r\n width *= rect[2]\r\n height *= rect[3] # <= Typo was here\r\n subax = fig.add_axes([x,y,width,height],axisbg=axisbg)\r\n x_labelsize = subax.get_xticklabels()[0].get_size()\r\n y_labelsize = subax.get_yticklabels()[0].get_size()\r\n x_labelsize *= rect[2]**0.5\r\n y_labelsize *= rect[3]**0.5\r\n subax.xaxis.set_tick_params(labelsize=x_labelsize)\r\n subax.yaxis.set_tick_params(labelsize=y_labelsize)\r\n return subax\r\n\r\n \r\n\r\njsx_file_str_AI_CS6 = \"\"\"\r\nfunction exportFigures_AI_CS6(sourceFile, targetFile, exportType, ExportOpts) {\r\n if (sourceFile){ // if not an empty string\r\n var fileRef = new File(sourceFile)\r\n var sourceDoc = app.open(fileRef); // returns the document object\r\n } else { // for empty string, use current active document\r\n sourceDoc = app.activeDocument();\r\n }\r\n var newFile = new File(targetFile) // newly saved file\r\n\r\n switch(exportType){\r\n case 'png':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsPNG24()\r\n ExportOpts.antiAliasing = true;\r\n ExportOpts.transparency = true;\r\n ExportOpts.saveAsHTML = true;\r\n }\r\n // Export as PNG\r\n sourceDoc.exportFile(newFile, ExportType.PNG24, ExportOpts);\r\n case 'tiff':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsTIFF();\r\n ExportOpts.resolution = 600;\r\n ExportOpts.byteOrder = TIFFByteOrder.IBMPC;\r\n ExportOpts.IZWCompression = false;\r\n ExportOpts.antiAliasing = true\r\n }\r\n sourceDoc.exportFile(newFile, ExportType.TIFF, ExportOpts);\r\n case 'svg':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new ExportOptionsSVG();\r\n ExportOpts.embedRasterImages = true;\r\n ExportOpts.embedAllFonts = true;\r\n ExportOpts.fontSubsetting = SVGFontSubsetting.GLYPHSUSED;\r\n }\r\n // Export as SVG\r\n sourceDoc.exportFile(newFile, ExportType.SVG, ExportOpts);\r\n case 'eps':\r\n if (ExportOpts == null) {\r\n var ExportOpts = new EPSSaveOptions(); \r\n ExportOpts.cmykPostScript = true;\r\n ExportOpts.embedAllFonts = true;\r\n ExportOpts.compatibleGradientPrinting = true;\r\n ExportOpts.includeDocumentThumbnails = true;\r\n }\r\n\r\n // Export as EPS\r\n sourceDoc.saveAs(newFile, ExportOpts);\r\n }\r\n // Close the file after saving. Simply save another copy, do not overwrite\r\n sourceDoc.close(SaveOptions.DONOTSAVECHANGES);\r\n}\r\n\r\n// Use the function to convert the files\r\nexportFigures_AI_CS6(sourceFile=\"{format_source_file}\", targetFile=\"{format_target_file}\", exportType=\"eps\", ExportOpts=null)\r\n// exportFigures_AI_CS6(sourceFile=arguments[0], targetFile=arguments[1], exportType=arguments[2])\r\n\"\"\"\r\n\r\n\r\ndef svg2eps_ai(source_file, target_file, \\\r\n illustrator_path=\"D:/Edward/Software/Adobe Illustrator CS6/Support Files/Contents/Windows/Illustrator.exe\",\\\r\n jsx_file_str = jsx_file_str_AI_CS6, DEBUG=False):\r\n \"\"\"Use Adobe Illustrator to convert svg to eps\"\"\"\r\n # Change the strings\r\n jsx_file_str = jsx_file_str.replace('{format_source_file}', source_file)\r\n jsx_file_str = jsx_file_str.replace('{format_target_file}', target_file).replace('\\\\','/')\r\n tmp_f = os.path.abspath(os.path.join(os.path.dirname(target_file), \"tmp.jsx\"))\r\n f = open(tmp_f, 'w')\r\n f.write(jsx_file_str)\r\n f.close()\r\n\r\n # Remove previous target file if already existed\r\n if os.path.isfile(target_file):\r\n os.remove(target_file)\r\n\r\n # subprocess.check_call([illustrator_path, '-run', tmp_f])\r\n cmd = \" \".join(['\"'+illustrator_path+'\"', '-run', '\"'+tmp_f+'\"'])\r\n pro = subprocess.Popen(cmd, stdout=subprocess.PIPE)\r\n # print(pro.stdout)\r\n # continuously check if new files are updated\r\n time.sleep(5.0)\r\n sleep_iter = 5.0\r\n max_sleep_iter = 40\r\n while not os.path.isfile(target_file):\r\n time.sleep(1.0)\r\n sleep_iter = sleep_iter + 1.0\r\n if sleep_iter > max_sleep_iter:\r\n break\r\n\r\n # pro.terminate()\r\n #os.kill(os.getpid(), signal.SIGTERM) # Send the signal to all the process groups\r\n pro.kill()\r\n os.remove(tmp_f)\r\n\r\ndef svg2eps_inkscape(source_file, target_file, \\\r\n inkscape_path='\"D:\\\\Edward\\\\Software\\\\inkscape-0.91-1-win64\\\\inkscape.exe\"'):\r\n \"\"\"Use inkscape to convert svg to eps\"\"\"\r\n # cmd = \"inkscape in.svg -E out.eps --export-ignore-filters --export-ps-level=3\"\r\n cmd = inkscape_path+\" \"+source_file+\" --export-eps=\"+target_file +\" --export-ignore-filters --export-ps-level=3\"\r\n print(cmd) # Problem: text was not kept as text, but converted into paths\r\n pro = subprocess.Popen(cmd, stdout=subprocess.PIPE)\r\n #subprocess.check_call([inkscape_path, source_file, '-E', target_file])\r\n print(pro.stdout)\r\n \r\n#def svg2eps_cloudconvert(source_file, target_file):\r\n# import cloudconvert\r\n# api = cloudconvert.Api('5PGyLT7eAn0yLbnBU3G-7j1JLFWTfcnFUk6x7k_lhuwzioGwqO7bVQ-lJNunsDkrr9fL1JDdjdVog6iDZ31yIw')\r\n# process = api.convert({\"input\": \"upload\",\r\n# \"file\": open('R:/temp.svg', 'rb'),\r\n# \"inputformat\": \"svg\",\r\n# \"outputformat\": \"eps\",\r\n# })\r\n# process.wait()\r\n# process.download()\r\n\r\ndef save_svg2eps(fig, savepath):\r\n if '.eps' in savepath:\r\n savepath = savepath.replace('.eps', '.svg')\r\n fig.savefig(savepath, bbox_inches='tight', dpi=300, transparent=True) # save as svg first\r\n svg2eps_ai(savepath, savepath.replace('.svg', '.eps'))\r\n os.remove(savepath)\r\n \r\n \r\ndef plot_ci_manual(xdata, ydata, x_plot, y_plot, popt, alpha=0.95, ax=None, color=\"#b9cfe7\", edgecolor=\"\", *args, **kwargs):\r\n \"\"\"Return an axes of confidence bands using a simple approach.\r\n\r\n Notes\r\n -----\r\n .. math:: \\left| \\: \\hat{\\mu}_{y|x0} - \\mu_{y|x0} \\: \\right| \\; \\leq \\; T_{n-2}^{.975} \\; \\hat{\\sigma} \\; \\sqrt{\\frac{1}{n}+\\frac{(x_0-\\bar{x})^2}{\\sum_{i=1}^n{(x_i-\\bar{x})^2}}}\r\n .. math:: \\hat{\\sigma} = \\sqrt{\\sum_{i=1}^n{\\frac{(y_i-\\hat{y})^2}{n-2}}}\r\n\r\n References\r\n ----------\r\n .. [1]: M. Duarte. \"Curve fitting,\" Jupyter Notebook.\r\n http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/CurveFitting.ipynb\r\n\r\n \"\"\"\r\n if ax is None:\r\n ax = plt.gca()\r\n \r\n DF = len(xdata) - len(popt)\r\n y_model = np.polyval(popt, xdata)\r\n resid = ydata - y_model\r\n chi2 = np.sum((resid/y_model)**2)\r\n # chi2_red = chi2/DF\r\n s_err = np.sqrt(np.sum(resid**2)/DF)\r\n \r\n t = stats.t.ppf(alpha, DF)\r\n ci = t*s_err*np.sqrt(1/len(xdata) + (x_plot-np.mean(xdata))**2/np.sum((xdata-np.mean(xdata))**2))\r\n ax.fill_between(x_plot, y_plot+ci, y_plot-ci, color=color, edgecolor=edgecolor, *args, **kwargs)\r\n\r\n return ax\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5466732978820801,
"alphanum_fraction": 0.5705064535140991,
"avg_line_length": 29.96825408935547,
"blob_id": "9a793b1764ac0a571590941c910d3be13ef31a37",
"content_id": "dbb961f40553f58e653c17c256fab098e8680fd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2014,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 63,
"path": "/image_processing/xRemoveStripesVertical.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport pywt\r\n\r\ndef xRemoveStripesVertical(ima, decNum=8, wname='db42', sigma=8):\r\n \"\"\"\"\r\n Stripe and Ring artifact remover\r\n\r\n nima = xRemoveStripesVertical(ima, decNum, wname, sigma)\r\n\r\n Inputs:\r\n ima: image matrix\r\n decNum: highest decomposition level (L). Default 8.\r\n wname: wavelet type. See WFILTERS.\r\n sigma: damping factor of Gaussian function\r\n g(x_hat, y_hat) = 1 - exp(-y_hat^2 / (2 * sigma^2))\r\n Default 8.\r\n\r\n Output:\r\n nima: filtered image\r\n\r\n From\r\n Beat Munch, Pavel Trtik, Federica Marone, Marco Stampanoni. Stripe and\r\n ring artifact removal with combined wavelet -- Fourier filtering. Optics\r\n Express. 17(10): (2009)\r\n\r\n Suggestion for parameters:\r\n Based on the above cited paper,\r\n For waterfall artifacts (vertical stripes),\r\n decNum>=8, wname='db42', sigma>=8\r\n For ring artifacts\r\n decNum>=5, wname='db30', sigma>=2.4\r\n\r\n \"\"\"\r\n\r\n # Check wavelet\r\n if wname not in pywt.wavelist():\r\n wname = pywt.Wavelet(wname, wfilters(wname))\r\n # wavelet decomposition\r\n Ch = [[]] * decNum # cell(1,decNum)\r\n Cv = [[]] * decNum # cell(1,decNum)\r\n Cd = [[]] * decNum # cell(1,decNum)\r\n for ii in np.arange(0, decNum):\r\n ima,Ch[ii],Cv[ii],Cd[ii] = pywt.dwt2(ima,wname)\r\n\r\n # FFT transform of horizontal frequency bands\r\n for ii in np.arange(0, decNum):\r\n # FFT\r\n fCv = np.fft.fftshift(np.fft.fft(Cv[ii]))\r\n my, mx = np.shape(fCv)\r\n\r\n # damping of vertical stripe information\r\n damp = 1-np.exp(-np.arange(-np.floor(my/2), -np.floor(my/2)+my-1, 1)**2/(2*sigma**2))\r\n fCv = fCv * damp[:,np.newaxis]\r\n\r\n # inverse FFT\r\n Cv[ii]=np.fft.ifft(np.fft.ifftshift(fCv))\r\n\r\n # wavelet reconstruction\r\n nima=ima\r\n for ii in np.arange(decNum, 0, -1):\r\n nima = nima[0:Ch[ii].shape[0], 0:Ch[ii].shape[1]]\r\n nima=pywt.idwt2((nima, (Ch[ii], Cv[ii], Cd[ii])), wname)\r\n return(nima)\r\n"
},
{
"alpha_fraction": 0.584551990032196,
"alphanum_fraction": 0.5974475145339966,
"avg_line_length": 48.40666580200195,
"blob_id": "34e9e310fe849c91406e5eb94b6aa776c0ff3e8f",
"content_id": "bb637280cb4546fffbb38fc971f9bb9ab842d5ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7522,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 150,
"path": "/ReadNWrite/csv2excel.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#! /hsgs/projects/jhyoon1/pkg64/pythonpackages/anaconda/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nFunctions that helps summarize csv results.\nMake sure to point PYTHONPACKAGEPATH variable to the correct python package directory\n\nCreated on Thu Jan 30 19:07:21 2014\n\n@author: Edward Cui\n\"\"\"\n\ncsvFileList = ['/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/percent_signal_change/SNleft_MEAN.csv',\n '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/percent_signal_change/SNright_MEAN.csv',\n '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/percent_signal_change/STNleft_MEAN.csv',\n '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/percent_signal_change/STNright_MEAN.csv',\n '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/percent_signal_change/RNleft_MEAN.csv',\n '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/percent_signal_change/RNright_MEAN.csv']\nExcelFile = '/hsgs/projects/jhyoon1/midbrain_pilots/mid/analysis/percent_signal_change/summary.xlsx'\nRemoveAppendix = 1; # how many appendix to remove: each underscore is one appendix, counting backwards from the file extension\n\n# directory of the python package\nPYTHONPACKAGEPATH = '/hsgs/projects/jhyoon1/pkg64/pythonpackages/'\n\ndef strfind_vect(s,c):\n \"\"\"\n Find list of indices of a character in a string\n \"\"\"\n return [i for i, letter in enumerate(s) if letter == c]\n \n\ndef excelsummary(worksheet,SummaryRange,SummaryRowOffset=4):\n \"\"\"\n Writes mean, standard deviation, and standard error to the bottom of a sheet\n of results, separated by groups of each row\n excelsummary(GroupIndex,SummaryRowOffset)\n Inputs:\n worksheet: worksheet handl generated by xlswriter\n SummaryRange: Data to be used in the summary. E.g., ['A1:F36','A37:F58'], \n with each element as a group.\n SummaryRowOffset: counting from the last row of the data, how many more\n rows to offset to start writing the summary result.\n Default is 4.\n \"\"\"\n return worksheet;#return updated worksheet\n\ndef excelchart(workbook,worksheet,ChartType,DataRange,ErrorBarRange='',Xlab,Ylab,Title):\n \"\"\"\n Create a chart for excel sheet\n excelchart(ExcelFile,SheetName,ChartType,DataRange,ErrorBarRange)\n Inputs:\n worksheet: worksheet handle generated by xlsxwriter\n ChartType: type of chart. Supports: 'bar' and 'line'\n DataRange: range of data to use. With the format\n [[S1_header,S1_value,G1],[S2_header,S2_value,G2]], where S1 and S2\n are different series/groups of data, and G1/G2 are group names\n E.g., DataRange =[['A1:A8','B1:B8','C'],['A11:A18','B11:B18','SZ']].\n S1 and S2 must have the same length.\n ErrorBarRange: Range of error bars to be applied onto the graph.\n Specify as [E1,E2], whereas E1 and E2 are error bar range\n for each series\n \"\"\"\n # Create a new chart on current sheet\n chart = workbook.add_chart({'type':ChartType});\n # Configure each chart's data series\n for v in range(0,len(DataRange)):\n if ErrorBarRange:\n chart.add_series({'values':worksheet.name+'!'+DataRange[v][2],\n 'categories':worksheet.name+'!'+DataRange[v][1],\n 'name':DataRange[v][3],\n 'y_error_bars':{\n 'type':'custom',\n 'plus_values':worksheet.name+'!'+ErrorBarRange[v],\n 'minus_values':worksheet.name+'!'+ErrorBarRange[v],\n 'end_style':1,\n 'direction':'both'}});\n else:\n chart.add_series({'values':worksheet.name+'!'+DataRange[v][2],\n 'categories':worksheet.name+'!'+DataRange[v][1],\n 'name':DataRange[v][3]});\n # set chart aesthetics \n chart.set_title({'name':worksheet.name+' '+Title});\n chart.set_legend({'position':'right'});\n chart.set_size({'width':720,'height':576});\n chart.set_x_axis({'name':Xlab});\n chart.set_y_axis({'name':Ylab,\n 'major_gridlines':{'visible':False}});\n # Insert the chart\n worksheet = worksheet.insert_chart('A'+str(worksheet.dim_rowmax+2),chart);\n \n return worksheet;#return updated worksheet\n \n\ndef csv2excel(ExcelFile,csvFileList,SummaryRange='',ChartType='',\n RemoveAppendix=0,SummaryRowOffset=4,ErrorBarRange=''):\n \"\"\"\n A function that writes each .csv file to an Excel spreadsheet\n csv2excel(csvFileList, ExcelFile,RemoveAppendix=0)\n Inputs: \n ExcelFile: output Excel spreadsheet to be created\n csvFileList: list of full paths of csv files\n SummaryRange: Data to be used in the summary. E.g., \n ['A1:F36','A37:F58'], \n with each element as a group.\n ChartType: type of chart. Supports: 'bar'and'line'\n RemoveAppendix (optional): how many appendix to remove and to be use \n as sheet names? Each underscore is one appendix. \n Counting backwards starting from the file extension. \n Default is 0.\n SummaryRowOffset(optional): counting from the last row of the data, \n how many more\n rows to offset to start writing the summary result.\n Default is 4.\n ErrorBarRange(optional): Range of error bars to be applied onto \n the graph. Specify in similar fashion as DataRange.\n \"\"\"\n # Set necessary paths\n import sys; import os; import xlsxwriter; import csv;\n global PYTHONPACKAGEPATH;\n sys.path.append(PYTHONPACKAGEPATH);\n # Open an Excel spreadsheet to writ the imported csv files\n workbook = xlsxwriter.Workbook(ExcelFile);\n # Transverse through all the .csv files\n for n in csvFileList:\n (_,SHEETNAME) = os.path.split(n);#get file name\n SHEETNAME = SHEETNAME.replace('.csv','_');#remove .csv file extension \n if RemoveAppendix>0:\n IND =[x-len(SHEETNAME) for x in strfind_vect(SHEETNAME,'_')];# get index\n SHEETNAME = SHEETNAME[:(IND[RemoveAppendix-1])];\n IND = None;#remove variables\n worksheet = None;#clear variables\n worksheet = workbook.add_worksheet(SHEETNAME);# add a new worksheet\n SHEETNAME = None;# remove variables\n r = 1;\n with open(n,'rb') as csvfile:\n sheet = csv.reader(csvfile,delimiter=',');\n for row in sheet:\n worksheet.write_row('A'+str(r),row);#write each row\n r = r+1;#row index \n csvfile.close();#close the .csv file\n row = None; # remove some variables\n # add a summary if specified\n if SummaryRange:\n worksheet = excelsummary(worksheet,SummaryRange,SummaryRowOffset)\n # add a chart if specified\n if ChartType:\n #parse data range for chart insertion\n DataRange = '';\n worksheet = excelchart(workbook,ChartType,DataRange,ErrorBarRange)\n \n workbook.close();#close workbook\n \n \n\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n "
},
{
"alpha_fraction": 0.5373067259788513,
"alphanum_fraction": 0.5505265593528748,
"avg_line_length": 30.19565200805664,
"blob_id": "8756d692c0252b756d090efaeeb4d5d9e128154e",
"content_id": "2e2eebbb8d82e5cda7a440b7ced704319fc0d112",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4463,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 138,
"path": "/python_tutorials/ThinkPython/practice_notes_8.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\r\n# Day 8: January 19, 2013\r\n\r\n# Card object\r\nclass Card(object):\r\n \"\"\"Represents a standard playing card.\"\"\"\r\n \r\n def __init__(self,suit=0,rank=2):#define default card: Club of 2\r\n \"\"\"Define a Card object\r\n #suit number: Spades -->3\r\n Hearts -->2\r\n Diamonds -->1\r\n Clubs -->0\r\n \"\"\"\r\n self.suit=suit;\r\n self.rank=rank;\r\n \r\n suit_names=['Clubs','Diamonds','Hearts','Spades'];\r\n rank_names=[None,'Ace','2','3','4','5','6','7','8','9',\r\n '10','Jack','Queen','King'];#Shift+Enter to break the line\r\n def __str__(self):#define print\r\n \"\"\"Allows printing\"\"\"\r\n return '%s of %s' %(Card.rank_names[self.rank],\r\n Card.suit_names[self.suit]);\r\n \r\n #enable orderablility of the object\r\n def __lt__(self,other): #less than\r\n return (self.suit,self.rank)<(other.suit,other.rank); \r\n def __le__(self,other): #less than or equal to\r\n return (self.suit,self.rank)<=(other.suit,other.rank); \r\n def __eq__(self,other): #equal\r\n return (self.suit,self.rank)==(other.suit,other.rank); \r\n def __ge__(self,other): #greater than or equal to\r\n return (self.suit,self.rank)>=(other.suit,other.rank); \r\n def __gt__(self,other): #greater than\r\n return (self.suit,self.rank)>=(other.suit,other.rank);\r\n def __ne__(self,other): #not equal to\r\n return (self.suit,self.rank)!=(other.suit,other.rank);\r\n \r\n def keyFunction(self):\r\n return (self.suit,self.rank);\r\n\r\n# Test class Cards\r\n#create a card\r\nqueen_of_diamonds=Card(1,12);\r\n#print(queen_of_diamonds);\r\nking_of_clubs=Card(0,13);\r\nking_of_clubs.compareCards(queen_of_diamonds)\r\n#>>-1\r\nC=Card(0,13);\r\nking_of_clubs.compareCards(C);\r\n##>>0\r\n\r\n# Deck Object\r\nimport random;\r\n\r\nclass Deck(object):\r\n def __init__(self):\r\n \"\"\"Create a 52 card deck card\"\"\"\r\n self.cards=[];#empty list with object Cards\r\n for suit in range(4):#enumerates 0 to 3\r\n for rank in range(1,14):#enumerates 1 to 13\r\n card = Card(suit,rank);\r\n self.cards.append(card);\r\n \r\n def __str__(self):\r\n res=[];\r\n for card in self.cards:\r\n res.append(str(card));#str(card) converts the card to string\r\n #instead of displaying its number\r\n return '\\n'.join(res);\r\n \r\n def pop_card(self):\r\n return self.cards.pop();#remove the last card from the list\r\n #and return it\r\n \r\n def add_card(self,card):\r\n self.cards.append(card);#append a card to the list/deck\r\n \r\n def shuffle(self):\r\n random.shuffle(self.cards);#randomly shuffle the list/deck\r\n \r\n def sort(self, order=1):\r\n \"\"\"sort order-->1:ascending, 0:desending\"\"\"\r\n cardKeys=[];\r\n for card in self.cards:\r\n cardKeys.append((card.suit,card.rank));#get keys of each card\r\n \r\n if order ==1:\r\n cardKeys.sort(reverse=False);\r\n else:\r\n cardKeys.sort(reverse=True);\r\n \r\n del self.cards;#clear cards\r\n self.cards=[];#restart a new deck\r\n for key in cardKeys:\r\n card=Card(key[0],key[1]);#creating new cards\r\n self.cards.append(card);#creating the new deck in order\r\n \r\n\r\n#Test Deck object\r\ndeck = Deck();\r\nprint(deck);\r\n#Ace of Clubs\r\n#2 of Clubs\r\n#...\r\n#10 of Spades\r\n#Jack of Spades\r\n#Queen of Spades\r\n#King of Spades\r\ndeck.shuffle();\r\nprint(deck);\r\ndeck.sort(0);#sort the deck descending order\r\nprint(deck);\r\n\r\n# Hand object and inheritance\r\nclass Hand(Deck):#Hand inherits Deck\r\n \"\"\"Represents a hand of playing cards.\"\"\"\r\n def __init__(self,label=''):\r\n self.cards=[];\r\n self.label=label;\r\n \r\n def move_cards(self,hand,num):\r\n \"\"\"Draw num cards from the one Hand/Deck\r\n and give it to another Hand/Deck\"\"\"\r\n for i in range(num):\r\n hand.add_card(self.pop_card());\r\n \r\n#Test Hand object\r\nhand=Hand('New Hand');\r\n#hand inherits whatever is in deck object\r\ndeck=Deck();#get a new deck\r\ncard=deck.pop_card();#take a card out from the Deck\r\nhand.add_card(card);#adding a card from a Deck to the Hand\r\nprint(hand);\r\n#>>>King of Spades\r\n\r\n#This concludes today's study.\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6488925218582153,
"alphanum_fraction": 0.6554552912712097,
"avg_line_length": 22.87755012512207,
"blob_id": "b6fa7fd9a80e80fc5aab8ce26505080455846ffa",
"content_id": "bd2f05bc4cd51dfb1dbb6b0373cd7220d1e7922e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1219,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 49,
"path": "/generic/bibtex_lowercase_key.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\r\ncorrect bibtex reference keys\r\n\"\"\"\r\n\r\n\r\nfrom optparse import OptionParser\r\nimport os\r\n\r\n# input_file = 'D:/Edward/Documents/Assignments/Case Western Reserve/StrowbridgeLab/Projects/TeA Persistence Cui and Strowbridge 2015/docs/ReferencesBibTex.bib'\r\n\r\nusage = \"usage: %prog [options] input_file\"\r\nparser = OptionParser(usage)\r\nparser.add_option(\"-o\",\"--output\", dest=\"output_file\", help=\"output file. Default overwrite input\", default=None)\r\noptions, args = parser.parse_args()\r\n\r\n# parse input\r\ninput_file = args[0]\r\n\r\n# parse output\r\nif options.output_file is None:\r\n output_file = os.path.join(os.path.dirname(input_file), 'tmp.bib').replace('\\\\','/')\r\nelse:\r\n output_file = options.output_file\r\n\r\n# open the files\r\nfidi = open(input_file, 'r')\r\nfido = open(output_file, 'w')\r\n\r\n# correct the file\r\nfor row in fidi:\r\n if '@' == row[0]:\r\n row = row.lower()\r\n # if '-' in row[0]:\r\n # row.replace('-','')\r\n fido.write(row)\r\n\r\n# close the file\r\nfidi.close()\r\nfido.close()\r\n\r\n# replace old file if necessary\r\nif options.output_file is None:\r\n os.remove(input_file)\r\n os.rename(output_file, input_file)\r\n"
},
{
"alpha_fraction": 0.44312795996665955,
"alphanum_fraction": 0.44628751277923584,
"avg_line_length": 30.625,
"blob_id": "240f1e201e9c9430f6e63d231c8703a2b6e986e2",
"content_id": "2a02ecc0035d8bc575da6ba560a48b13426422b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1266,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 40,
"path": "/Spikes/spikedetekt2/spikedetekt2/__init__.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport sys\nimport logging\n\nfrom kwiklib.utils import logger as log\nfrom kwiklib.utils import *\nfrom processing import *\nfrom core import *\nfrom kwiklib.dataio import *\n\n\n# -----------------------------------------------------------------------------\n# Module constants\n# -----------------------------------------------------------------------------\n__version__ = '0.3.0'\n\nAPPNAME = 'spikedetekt'\n\nABOUT = \"\"\"Automated spike detection for multichannel electrophysiological data.\n\nThis software was developed by Cyrille Rossant, Shabnam Kadir, Max Hunter, Dan Goodman and Kenneth Harris in the Cortical Processing Laboratory at UCL (http://www.ucl.ac.uk/cortexlab).\"\"\"\n\n\n# -----------------------------------------------------------------------------\n# Loggers\n# -----------------------------------------------------------------------------\nLOGGERS = {}\nlog.LOGGERS = LOGGERS\n# Console logger.\nLOGGER = log.ConsoleLogger(name='{0:s}.console'.format(APPNAME),\n print_caller=False)\nlog.register(LOGGER)\n\nsys.excepthook = log.handle_exception\n\n# Set the logging level.\nlog.set_level(logging.INFO)\n\n"
},
{
"alpha_fraction": 0.5912217497825623,
"alphanum_fraction": 0.5965787768363953,
"avg_line_length": 39.37783432006836,
"blob_id": "520ee41f8d249287e65782a2876e1dca8b0ab5d4",
"content_id": "cec77f53c113d368a956eecd61b6b6830b6d2766",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32854,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 794,
"path": "/PySynapse/SynapseQt.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated: Sat Apr 18 21:40:21 2015\r\n\r\nForm implementation generated from reading ui file 'SynapseQt.ui'\r\n\r\n by: PyQt4 UI code generator 4.10.4\r\n\r\nWARNING! All changes made in this file will be lost!\r\n\r\nMain window of Synapse\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport re\r\nimport numpy as np\r\nfrom pdb import set_trace\r\nimport subprocess\r\nimport pandas as pd\r\n\r\n\r\n# sys.path.append('D:/Edward/Documents/Assignments/Scripts/Python/PySynapse')\r\n# sys.path.append('D:/Edward/Docuemnts/Assignments/Scripts/Python/generic')\r\nfrom util.ImportData import NeuroData, get_cellpath\r\nfrom util.spk_util import *\r\nfrom app.Scope import ScopeWindow\r\nfrom app.Settings import *\r\n\r\nimport sip\r\nsip.setapi('QVariant', 2)\r\n\r\n# Routines for Qt import errors\r\nfrom PyQt5 import QtGui, QtCore, QtWidgets\r\n#from pyqtgraph.Qt import QtGui, QtCore\r\ntry:\r\n from PyQt5.QtCore import QString\r\nexcept ImportError:\r\n QString = str\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig)\r\n\r\n# Set some global variables\r\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\r\n__version__ = \"PySynapse 0.4\"\r\n\r\n# Custom helper functions\r\ndef sort_nicely(l):\r\n \"\"\" Sort the given list in the way that humans expect.\"\"\"\r\n convert = lambda text: int(text) if text.isdigit() else text\r\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\r\n l.sort( key=alphanum_key )\r\n return l\r\n\r\ndef my_excepthook(type, value, tback):\r\n \"\"\"This helps prevent program crashing upon an uncaught exception\"\"\"\r\n sys.__excepthook__(type, value, tback)\r\n\r\n# Custom File system\r\nclass Node(object):\r\n \"\"\"Reimplement Node object\"\"\"\r\n def __init__(self, name, path=None, parent=None, info=None):\r\n super(Node, self).__init__()\r\n\r\n self.name = name\r\n self.children = []\r\n self.parent = parent\r\n self.info = info\r\n\r\n self.is_dir = False\r\n self.is_sequence = False\r\n self.type = \"\" #drive, directory, file, link, sequence\r\n self.path = path\r\n self.is_traversed = False\r\n\r\n if parent is not None:\r\n parent.add_child(self)\r\n\r\n def add_child(self, child):\r\n self.children.append(child)\r\n child.parent = self\r\n\r\n def insert_child(self, position, child):\r\n if position < 0 or position > self.child_count():\r\n return False\r\n\r\n self.children.insert(position, child)\r\n child.parent = self\r\n\r\n return True\r\n \r\n def remove_child(self, position, child):\r\n if position < 0 or position > self.child_count():\r\n return False\r\n \r\n if child in self.children:\r\n self.children.remove(child)\r\n \r\n return True\r\n \r\n def child(self, row):\r\n return self.children[row]\r\n\r\n def child_count(self):\r\n return(len(self.children))\r\n\r\n def row(self):\r\n if self.parent is not None:\r\n return self.parent.children.index(self)\r\n return(0)\r\n\r\nclass FileSystemTreeModel(QtCore.QAbstractItemModel):\r\n \"\"\"Reimplement custom FileSystemModel\"\"\"\r\n FLAG_DEFAULT = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\r\n\r\n def __init__(self, path=None, parent=None, root='FileName'):\r\n super(FileSystemTreeModel, self).__init__()\r\n self.root = Node(root)\r\n self.parent = parent\r\n self.path = path\r\n if not self.path: # if startup path is not provided\r\n self.initialNode(sys.platform)\r\n else:\r\n self.getChildren(self.path, startup=True)\r\n\r\n def initialNode(self, running_os):\r\n \"\"\"create initial node based on OS\r\n On Windows, list all the drives\r\n On Mac, start at \"/Volumes\"\r\n On Linux, start at \"/\"\r\n \"\"\"\r\n if running_os[0:3] == 'win':\r\n hasLabel = True\r\n try:\r\n drives = subprocess.check_output('wmic logicaldisk get name, volumename', stderr=subprocess.STDOUT, timeout=3)\r\n except:\r\n hasLabel = False\r\n drives = subprocess.check_output('wmic logicaldisk get name', stderr=subprocess.STDOUT)\r\n if not drives: # final check\r\n raise(Exception('Cannot locate drives from wmic logicaldisk'))\r\n drives = drives.decode('utf-8')\r\n drives = drives.split('\\n') # split by lines\r\n for d in drives:\r\n if 'Name' in d or not d:\r\n continue\r\n dpath = re.split('[\\s]+',d)[:-1]\r\n if not dpath or not dpath[0]: # if empty string\r\n continue\r\n if hasLabel:\r\n label = \" \".join(dpath[1:])\r\n dpath = dpath[0]\r\n label += \" ({})\".format(dpath)\r\n else:\r\n cmd = 'wmic volume where \"name=' + \"'{}\\\\\\\\'\".format(dpath) + '\" get label'\r\n try:\r\n label = subprocess.check_output(cmd, stderr=subprocess.STDOUT, timeout=2)\r\n label = label.decode('utf-8')\r\n if \"No Instance\" in label:\r\n label = dpath\r\n else:\r\n label = re.split('[\\s]+', label)[1:-1]\r\n if isinstance(label, list):\r\n label = \" \".join(label)\r\n label += \" ({})\".format(dpath)\r\n except:\r\n label = dpath\r\n\r\n # Modify dpath to include slash\r\n dpath += \"/\"\r\n\r\n node = Node(label, dpath, parent=self.root)\r\n node.is_dir = True\r\n node.type = \"drive\" # drive\r\n elif running_os[0:3] == 'dar' or running_os[0:3] == 'mac':\r\n self.getChildren(\"/Volumes/\", startup=True)\r\n elif running_os[0:3] == 'lin':\r\n self.getChildren(\"/\", startup=True)\r\n else:\r\n self.getChildren(\"/\", startup=True)\r\n print(\"Warning: Unrecognized OS. Starting at '/' directory\")\r\n\r\n def getNode(self, index):\r\n if index.isValid():\r\n return(index.internalPointer())\r\n else:\r\n return(self.root)\r\n\r\n ## - dynamic row insertion starts here\r\n def canFetchMore(self, index):\r\n node = self.getNode(index)\r\n\r\n if node.is_dir and not node.is_traversed:\r\n return(True)\r\n\r\n return(False)\r\n\r\n ## this is where you put custom logic for handling your special nodes\r\n def fetchMore(self, index):\r\n parent = self.getNode(index)\r\n self.ucwd = parent.path\r\n\r\n nodes = self.getChildren(parent.path, startup=False)\r\n\r\n # insert the newly fetched files\r\n self.insertNodes(0, nodes, index)\r\n parent.is_traversed = True\r\n\r\n\r\n def hasChildren(self, index):\r\n node = self.getNode(index)\r\n\r\n if node.is_dir:\r\n return(True)\r\n\r\n return(super(FileSystemTreeModel, self).hasChildren(index))\r\n\r\n def getChildren(self, path, startup=False):\r\n dat_files, other_files, img_files = [], [], []\r\n # first separate files into two categories\r\n for file in os.listdir(path):\r\n if str(os.path.splitext(file)[1]).lower() == '.dat' and re.findall('.S(\\d+).E(\\d+).dat', file):\r\n dat_files.append(file)\r\n elif str(os.path.splitext(file)[1].lower()) == '.img':\r\n img_files.append(file)\r\n else:\r\n other_files.append(file)\r\n\r\n # Make the sequence for dat files\r\n sequence = self.createSequence(path=path, files=dat_files)\r\n # Make the stack for img files\r\n stack = self.createStack(path=path, files=img_files)\r\n\r\n # insert the nodes\r\n nodes = []\r\n parent = self.root if startup else None\r\n # Sort other files as human expect\r\n other_files = sort_nicely(other_files)\r\n # insert other files first\r\n for file in other_files:\r\n file_path = os.path.join(path, file)\r\n node = Node(file, file_path, parent=parent)\r\n if os.path.isdir(file_path):\r\n node.is_dir = True\r\n node.type = \"directory\" # directory\r\n elif os.path.islink(file_path):\r\n node.type = \"link\"\r\n else:\r\n node.type = \"file\"\r\n\r\n nodes.insert(0, node)\r\n\r\n # insert custom sequence\r\n for s in sequence:\r\n file_path = os.path.join(path, s['Name']+'.{}.dat')\r\n node = Node(\"{} ({:d})\".format(s['Name'], len(s['Dirs'])), file_path, parent=parent, info=s)\r\n node.is_dir = False\r\n node.type = \"sequence\"\r\n nodes.insert(0, node)\r\n\r\n # insert custom stack\r\n for t in stack:\r\n file_path = os.path.join(path, s['Name']+'.{}.IMG')\r\n node = Node(\"{} ({:d})\".format(s['Name'], len(s['Dirs'])), file_path, parent=parent, info=s)\r\n node.is_dir = False\r\n node.type = \"stack\"\r\n node.insert(0, node)\r\n\r\n return(nodes)\r\n\r\n def rowCount(self, parent):\r\n node = self.getNode(parent)\r\n return(node.child_count())\r\n\r\n ## dynamic row insert ends here\r\n def columnCount(self, parent):\r\n return(1)\r\n\r\n def flags(self, index):\r\n return(FileSystemTreeModel.FLAG_DEFAULT)\r\n\r\n def parent(self, index):\r\n node = self.getNode(index)\r\n\r\n parent = node.parent\r\n if parent == self.root:\r\n return(QtCore.QModelIndex())\r\n\r\n return(self.createIndex(parent.row(), 0, parent))\r\n\r\n def index(self, row, column, parent):\r\n node = self.getNode(parent)\r\n\r\n child = node.child(row)\r\n\r\n if not child:\r\n return(QtCore.QModelIndex())\r\n\r\n return(self.createIndex(row, column, child))\r\n\r\n def headerData(self, section, orientation, role):\r\n return(self.root.name)\r\n\r\n def data(self, index, role):\r\n if not index.isValid():\r\n return(None)\r\n\r\n node = index.internalPointer()\r\n\r\n if role == QtCore.Qt.DisplayRole:\r\n return(node.name)\r\n elif role == QtCore.Qt.DecorationRole: # insert icon here\r\n if node.type == 'drive':\r\n iconimg = 'drive.png'\r\n elif node.type == 'directory':\r\n iconimg = 'folder.png'\r\n elif node.type == 'file':\r\n iconimg = 'file.png'\r\n elif node.type == 'sequence':\r\n iconimg = 'activity.png'\r\n elif node.type == 'stack':\r\n iconimg = 'setting.png'\r\n else: # for debugging, should not reach this\r\n raise(TypeError('Unrecognized node type'))\r\n return QtGui.QIcon(QtGui.QPixmap('resources/icons/'+iconimg))\r\n elif role == QtCore.Qt.BackgroundRole: # insert highlight color here\r\n return(QtGui.QBrush(QtCore.Qt.transparent))\r\n else:\r\n return(None)\r\n\r\n def insertNodes(self, position, nodes, parent=QtCore.QModelIndex()):\r\n node = self.getNode(parent)\r\n success = False\r\n\r\n self.beginInsertRows(parent, position, position + len(nodes) - 1)\r\n\r\n for child in nodes:\r\n success = node.insert_child(position, child)\r\n\r\n self.endInsertRows()\r\n\r\n return success\r\n \r\n def refreshNode(self, parent=QtCore.QModelIndex()):\r\n node = self.getNode(parent)\r\n # set_trace()\r\n # Remove old items\r\n self.beginRemoveRows(parent, 0, len(node.children)) \r\n node.children = [] \r\n self.endRemoveRows()\r\n # Add new items\r\n self.fetchMore(parent)\r\n \r\n def fileName(self, index):\r\n return(self.getNode(index))\r\n\r\n def filePath(self, index):\r\n return(os.path.dirname(self.getNode(index)))\r\n\r\n def setRootPath(self, path):\r\n self.path = path\r\n\r\n def createSequence(self, path, files=None):\r\n \"\"\"Extract episode information in order to create a table\r\n Set name of the sequence based on the list of files.\r\n Return True if successfully made the sequence.\"\"\"\r\n if not files:\r\n return([])\r\n Z = ['S%s.E%s'%re.findall('.S(\\d+).E(\\d+).dat', f)[0] for f in files]\r\n Q = [re.split('.S(\\d+).E(\\d+).dat', f)[0] for f in files] # name\r\n # get unique IDs\r\n names, _, inverse, counts = np.unique(Q, return_index=True, return_inverse=True, return_counts=True)\r\n sequence = []\r\n\r\n for n, nm in enumerate(names):\r\n sequence.append({'Name':('%s'%(nm)),\r\n 'Dirs': [os.path.join(path, pp).replace('\\\\','/') for ii, pp in zip(inverse==n, files) if ii],\r\n 'Epi': [zz for ii, zz in zip(inverse==n, Z) if ii],\r\n 'Time':[],\r\n 'Sampling Rate': [],\r\n 'Duration':[],\r\n 'Drug Level':[],\r\n 'Drug Name': [],\r\n 'Drug Time': [],\r\n 'Comment': []\r\n })\r\n # load episode info\r\n for d in sequence[n]['Dirs']:\r\n # zData = readDatFile(d, readTraceData = False)\r\n zData = NeuroData(d, old=True, infoOnly=True)\r\n\r\n sequence[n]['Time'].append(zData.Protocol.WCtimeStr)\r\n sequence[n]['Sampling Rate'].append(zData.Protocol.msPerPoint)\r\n sequence[n]['Duration'].append(int(zData.Protocol.sweepWindow))\r\n sequence[n]['Drug Level'].append(zData.Protocol.drug)\r\n sequence[n]['Drug Name'].append(zData.Protocol.drugName)\r\n sequence[n]['Drug Time'].append(zData.Protocol.drugTimeStr)\r\n sequence[n]['Comment'].append(zData.Protocol.stimDesc)\r\n\r\n return(sequence)\r\n\r\n def createStack(self, path, files=None):\r\n \"\"\"For images\"\"\"\r\n return([])\r\n\r\n# Episode Table\r\nclass EpisodeTableModel(QtCore.QAbstractTableModel):\r\n def __init__(self, dataIn=None, parent=None, *args):\r\n super(EpisodeTableModel, self).__init__()\r\n self.datatable = dataIn\r\n self.selectedRow = None\r\n\r\n def update(self, dataIn):\r\n # print('Updating Model')\r\n self.datatable = dataIn # pandas dataframe\r\n # print('Datatable : {0}'.format(self.datatable))\r\n\r\n def rowCount(self, parent=QtCore.QModelIndex()):\r\n return len(self.datatable.index)\r\n\r\n def columnCount(self, parent=QtCore.QModelIndex()):\r\n return len(self.datatable.columns.values)\r\n\r\n def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):\r\n if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Horizontal:\r\n return self.datatable.columns[section]\r\n return QtCore.QAbstractTableModel.headerData(self, section, orientation, role)\r\n\r\n def data(self, index, role=QtCore.Qt.DisplayRole):\r\n i = index.row()\r\n j = index.column()\r\n if role == QtCore.Qt.DisplayRole:\r\n # return the data got as a string\r\n return '{0}'.format(self.datatable.iat[i, j])\r\n elif role == QtCore.Qt.BackgroundRole:\r\n return QtGui.QBrush(QtCore.Qt.transparent)\r\n else:\r\n return None\r\n\r\n def flags(self, index):\r\n return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\r\n\r\n\r\n# Episode Tableview delegate for selection and highlighting\r\nclass TableviewDelegate(QtWidgets.QItemDelegate):\r\n def __init__(self, parent=None, *args):\r\n QtWidgets.QItemDelegate.__init__(self, parent, *args)\r\n\r\n def paint(self, painter, option, index):\r\n # print('here painter delegates')\r\n painter.save()\r\n # set background color\r\n painter.setPen(QtGui.QPen(QtCore.Qt.NoPen))\r\n if (option.state & QtWidgets.QStyle.State_Selected):\r\n grid_color = QtGui.QColor(31,119,180,225)\r\n text_color = QtCore.Qt.white\r\n else:\r\n grid_color = QtCore.Qt.transparent\r\n text_color = QtCore.Qt.black\r\n\r\n # color the grid\r\n painter.setBrush(QtGui.QBrush(grid_color))\r\n painter.drawRect(option.rect)\r\n\r\n # color the text\r\n painter.setPen(QtGui.QPen(text_color))\r\n value = index.data(QtCore.Qt.DisplayRole)\r\n painter.drawText(option.rect, QtCore.Qt.AlignVCenter |QtCore.Qt.AlignHCenter, value)\r\n\r\n painter.restore()\r\n\r\n# %%\r\nclass Synapse_MainWindow(QtWidgets.QMainWindow):\r\n def __init__(self, parent=None, startpath=None, hideScopeToolbox=True, layout=None):\r\n super(Synapse_MainWindow, self).__init__(parent)\r\n # Set up the GUI window\r\n self.setupUi(self)\r\n # Set the treeview model for directory\r\n self.setDataBrowserTreeView(startpath=startpath)\r\n self.hideScopeToolbox = hideScopeToolbox\r\n self.scopeLayout = layout\r\n self.startpath=startpath\r\n\r\n def setupUi(self, MainWindow):\r\n \"\"\"This function is converted from the .ui file from the designer\"\"\"\r\n # Set up basic layout of the main window\r\n MainWindow.setObjectName(_fromUtf8(\"Synpase TreeView\"))\r\n MainWindow.resize(1000, 500)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)\r\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\r\n\r\n # Set splitter for two panels\r\n self.splitter = QtWidgets.QSplitter(self.centralwidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())\r\n self.splitter.setSizePolicy(sizePolicy)\r\n self.splitter.setOrientation(QtCore.Qt.Horizontal)\r\n self.splitter.setObjectName(_fromUtf8(\"splitter\"))\r\n\r\n # Set treeview\r\n self.treeview = QtWidgets.QTreeView(self.splitter)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(1)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.treeview.sizePolicy().hasHeightForWidth())\r\n self.treeview.setSizePolicy(sizePolicy)\r\n # self.treeview.setTextElideMode(QtCore.Qt.ElideNone)\r\n self.treeview.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)\r\n self.treeview.header().setStretchLastSection(False)\r\n self.treeview.setObjectName(_fromUtf8(\"treeview\"))\r\n\r\n # Set up Episode list table view\r\n self.tableview = QtWidgets.QTableView(self.splitter)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(3)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.tableview.sizePolicy().hasHeightForWidth())\r\n self.tableview.setSizePolicy(sizePolicy)\r\n self.tableview.setObjectName(_fromUtf8(\"tableview\"))\r\n # additional tableview customizations\r\n self.tableview.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\r\n self.tableview.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\r\n self.tableview.setItemDelegate(TableviewDelegate(self.tableview))\r\n self.tableview.horizontalHeader().setStretchLastSection(True)\r\n # self.tableview.setShowGrid(False)\r\n self.tableview.setStyleSheet(\"\"\"QTableView{border : 20px solid white}\"\"\")\r\n self.horizontalLayout.addWidget(self.splitter)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n\r\n # Set up menu bar\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 638, 100))\r\n self.menubar.setObjectName(_fromUtf8(\"menubar\"))\r\n self.setMenuBarItems() # call function to set menubar\r\n MainWindow.setMenuBar(self.menubar)\r\n\r\n # Set up status bar\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(_fromUtf8(\"statusbar\"))\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n # Execution\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n # ---------------- Additional main window behaviors -----------------------\r\n def setMenuBarItems(self):\r\n # File Menu\r\n fileMenu = self.menubar.addMenu('&File')\r\n\r\n # File: Load csv\r\n loadDBAction = QtWidgets.QAction('Load Database', self)\r\n loadDBAction.setStatusTip('Load a database table from a .csv, .xlsx, or .xls file')\r\n loadDBAction.triggered.connect(self.loadDatabase)\r\n fileMenu.addAction(loadDBAction)\r\n \r\n # File: Refresh. Refresh currently selected item/directory\r\n refreshAction = QtWidgets.QAction('Refresh', self)\r\n refreshAction.setShortcut('F5')\r\n refreshAction.setStatusTip('Refresh currently selected item / directory')\r\n refreshAction.triggered.connect(self.refreshCurrentBranch)\r\n fileMenu.addAction(refreshAction)\r\n \r\n # File: Settings\r\n settingsAction = QtWidgets.QAction(\"Settings\", self)\r\n settingsAction.setStatusTip('Configure settings of PySynapse')\r\n settingsAction.triggered.connect(self.openSettingsWindow)\r\n fileMenu.addAction(settingsAction)\r\n \r\n # File: Exit\r\n exitAction = QtWidgets.QAction(QtGui.QIcon('exit.png'),'Exit', self)\r\n exitAction.setShortcut('Ctrl+Q')\r\n exitAction.setStatusTip('Exit Synapse')\r\n exitAction.triggered.connect(self.close)\r\n fileMenu.addAction(exitAction)\r\n \r\n # View Menu\r\n viewMenu = self.menubar.addMenu('&View')\r\n\r\n # View: Column\r\n columnMenu = viewMenu.addMenu('&Additional Columns')\r\n drugNameAction = QtWidgets.QAction('Drug Name', self, checkable=True, checked=False)\r\n drugNameAction.triggered.connect(lambda: self.toggleTableViewColumnAction(4, drugNameAction))\r\n columnMenu.addAction(drugNameAction)\r\n\r\n drugTimeAction = QtWidgets.QAction('Drug Time', self, checkable=True, checked=False)\r\n drugTimeAction.triggered.connect(lambda: self.toggleTableViewColumnAction(5, drugTimeAction))\r\n columnMenu.addAction(drugTimeAction)\r\n\r\n dirsAction = QtWidgets.QAction('Directory', self, checkable=True, checked=False)\r\n dirsAction.triggered.connect(lambda: self.toggleTableViewColumnAction(7, dirsAction))\r\n columnMenu.addAction(dirsAction)\r\n\r\n def toggleTableViewColumnAction(self, column, action):\r\n if self.tableview.isColumnHidden(column):\r\n self.tableview.showColumn(column)\r\n action.setChecked(True)\r\n self.tableview.hiddenColumnList.remove(column)\r\n else:\r\n self.tableview.hideColumn(column)\r\n action.setChecked(False)\r\n self.tableview.hiddenColumnList.append(column)\r\n\r\n def loadDatabase(self):\r\n # TODO: Need to design this more carefully\r\n #raise(NotImplementedError())\r\n # Opens up the file explorer\r\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '/', 'Spreadsheet (*.csv *.xlsx *.xls);;All Files (*)')#\r\n rename_dict = {\"Cell\":\"Name\", \"Episode\":\"Epi\", \"SweepWindow\":\"Duration\",\"Drug\":\"Drug Name\",\"DrugTime\":\"Drug Time\",\"WCTime\":\"Time\", \"StimDescription\":\"Comment\"}\r\n if \".csv\" in filename:\r\n df = pd.read_csv(filename)\r\n elif \".xlsx\" in filename or \"xls\" in filename:\r\n df = pd.read_excel(filename)\r\n else:\r\n return\r\n col_lower = [c.lower() for c in df.columns.tolist()]\r\n if \"show\" in col_lower:\r\n df = df.loc[df.iloc[:, col_lower.index(\"show\")],:]\r\n drop_columns = np.setdiff1d(df.columns.tolist(), list(rename_dict.keys()))\r\n df = df.drop(drop_columns, axis=1).rename(columns=rename_dict)\r\n df[\"Sampling Rate\"] = 0.1\r\n df[\"Drug Level\"] = 0\r\n df.loc[df[\"Drug Name\"].isnull(), \"Drug Name\"] = \"\"\r\n df[\"Time\"] = [NeuroData.epiTime(ttt) for ttt in df[\"Time\"]]\r\n df[\"Drug Time\"] = [NeuroData.epiTime(ttt) for ttt in df[\"Drug Time\"]]\r\n # TODO: Tentitative path\r\n df[\"Dirs\"] = [os.path.join(self.startpath, get_cellpath(cb, ep)).replace(\"\\\\\", \"/\") for cb, ep in zip(df[\"Name\"], df[\"Epi\"])]\r\n self.tableview.sequence = df.reset_index(drop=True).to_dict('list')\r\n df = df.reindex([\"Name\", \"Epi\", \"Time\", \"Duration\", \"Drug Name\", \"Drug Time\", \"Comment\"], axis=1) # drop columns not to be displayed\r\n # print('loaded')\r\n # Populate the loaded data unto the table widget\r\n self.tableview.headers = df.columns.tolist()\r\n self.tableview.model = EpisodeTableModel(df)\r\n self.tableview.setModel(self.tableview.model)\r\n self.tableview.verticalHeader().hide()\r\n # Show all columns\r\n for cc in range(len(self.tableview.headers)):\r\n self.tableview.showColumn(cc)\r\n\r\n self.tableview.selectionModel().selectionChanged.connect(self.onItemSelected)\r\n\r\n def refreshCurrentBranch(self):\r\n # Get parent index\r\n index = self.treeview.selectionModel().currentIndex()\r\n node = self.treeview.model.getNode(index)\r\n if node.type == \"directory\":\r\n self.treeview.model.refreshNode(index)\r\n \r\n def openSettingsWindow(self):\r\n if not hasattr(self, 'settingsWidget'):\r\n self.settingsWidget = Settings()\r\n if self.settingsWidget.isclosed:\r\n self.settingsWidget.show()\r\n self.settingsWidget.isclosed = False\r\n \r\n def closeEvent(self, event):\r\n \"\"\"Override default behavior when closing the main window\"\"\"\r\n return\r\n #quit_msg = \"Are you sure you want to exit the program?\"\r\n #reply = QtWidgets.QMessageBox.question(self, 'Message', quit_msg,\r\n # QtWidgets.QMessageBox.Yes,\r\n # QtWidgets.QMessageBox.No)\r\n #if reply == QtWidgets.QMessageBox.Yes:\r\n # event.accept()\r\n #else:\r\n # event.ignore()\r\n # Consider if close children windows when closing Synapse main window\r\n # children = ['settingsWidget', 'sw']\r\n # for c in children:\r\n # if hasattr(self, c):\r\n # getattr(self, c).close()\r\n \r\n def retranslateUi(self, MainWindow):\r\n \"\"\"Set window title and other miscellaneous\"\"\"\r\n MainWindow.setWindowTitle(_translate(__version__, __version__, None))\r\n MainWindow.setWindowIcon(QtGui.QIcon('resources/icons/Synapse.png'))\r\n\r\n # ---------------- Data browser behaviors ---------------------------------\r\n def setDataBrowserTreeView(self, startpath=None):\r\n # Set file system as model of the tree view\r\n # self.treeview.model = QtWidgets.QFileSystemModel()\r\n self.treeview.model = FileSystemTreeModel(path=startpath)\r\n self.treeview.setModel(self.treeview.model)\r\n # Set behavior upon clicked\r\n self.treeview.clicked.connect(self.onSequenceClicked)\r\n\r\n @QtCore.pyqtSlot(QtCore.QModelIndex)\r\n def onSequenceClicked(self, index):\r\n \"\"\" Display a list of episodes upon sequence clicked\"\"\"\r\n #indexItem = self.treeview.model.index(index.row(), 0, index.parent())\r\n self.raise_()\r\n node = self.treeview.model.getNode(index)\r\n # Check if the item clicked is sequence instead of a folder / file\r\n if node.type == \"sequence\":\r\n # populate the table view on the other panel\r\n self.setEpisodeListTableView(node.info)\r\n \r\n # --------------- Episode list behaviors ----------------------------------\r\n def setEpisodeListTableView(self, sequence=None):\r\n if not sequence:\r\n return # do nothing if there is no sequence information\r\n self.tableview.headers = ['Epi', 'Time', 'Duration', 'Drug Level', 'Drug Name', 'Drug Time', 'Comment','Dirs', 'Stimulus', 'StimDuration']\r\n self.tableview.hiddenColumnList = [4, 5, 7, 8, 9] # Drug Name, Drug Time, Dirs\r\n # Render the data frame from sequence\r\n df = pd.DataFrame.from_dict(sequence)\r\n # sort the data frame by 'Epi' column\r\n epi_sort = df['Epi'].tolist()\r\n ind = pd.DataFrame([[int(k) for k in re.findall('\\d+', m)] \\\r\n for m in epi_sort])\r\n ind = ind.sort_values([0,1], ascending=[1,1]).index.tolist()\r\n df = df.reindex(ind, axis=0)\r\n self.tableview.sequence = df.reset_index(drop=True).to_dict('list') # data information\r\n # self.tableview.sequence['Name'] = self.tableview.sequence['Name'][0] # remove any duplication\r\n # get the subset of columns based on column settings\r\n df = df.reindex(self.tableview.headers, axis=1)\r\n self.tableview.model = EpisodeTableModel(df)\r\n self.tableview.setModel(self.tableview.model)\r\n self.tableview.verticalHeader().hide()\r\n # Hide some columns from display\r\n for c in self.tableview.hiddenColumnList: # Drug Name, Drug Time, Dirs\r\n self.tableview.setColumnHidden(c, True)\r\n # Set behavior upon selection\r\n self.tableview.selectionModel().selectionChanged.connect(self.onItemSelected)\r\n # self.tableview.clicked.connect(self.onItemSelected)\r\n\r\n @QtCore.pyqtSlot(QtCore.QItemSelection, QtCore.QItemSelection)\r\n def onItemSelected(self, selected, deselected):\r\n \"\"\"Executed when an episode in the tableview is clicked\"\"\"\r\n # Get the information of last selected item\r\n if not selected and not deselected:\r\n return\r\n try:\r\n ind = selected.indexes()[-1].row()\r\n except:\r\n ind = deselected.indexes()[-1].row()\r\n sequence = self.tableview.sequence\r\n drugName = sequence['Drug Name'][ind]\r\n if not drugName: # in case of empty string\r\n drugName = str(sequence['Drug Level'][ind])\r\n ep_info_str = \"ts: {:0.1f} ms; Drug: {} ({})\".format(sequence['Sampling Rate'][ind], drugName, sequence['Drug Time'][ind])\r\n self.statusBar().showMessage(ep_info_str)\r\n self.setWindowTitle(\"{} {}\".format(__version__, sequence['Dirs'][ind]))\r\n # Get selected row\r\n indexes = self.tableview.selectionModel().selectedRows()\r\n rows = [index.row() for index in sorted(indexes)]\r\n # if not rows: # When nothing is selected, keep the last selected item on the Scope\r\n # return\r\n # Call scope window\r\n if not hasattr(self, 'sw'): # Start up a new window\r\n # self.sw = ScopeWindow(parent=self)\r\n self.sw = ScopeWindow(partner=self, hideDock=self.hideScopeToolbox, layout=self.scopeLayout) # new window\r\n if self.sw.isclosed:\r\n self.sw.show()\r\n self.sw.isclosed = False\r\n # update existing window\r\n self.sw.updateEpisodes(episodes=sequence, index=rows)\r\n\r\n\r\nif __name__ == '__main__':\r\n sys.excepthook = my_excepthook # helps prevent uncaught exception crashing the GUI\r\n app = QtWidgets.QApplication(sys.argv)\r\n running_os = sys.platform[:3]\r\n # w = Synapse_MainWindow()\r\n if running_os == 'win':\r\n w = Synapse_MainWindow(startpath='D:/Data/Traces', hideScopeToolbox=False)\r\n elif running_os == 'dar':\r\n w = Synapse_MainWindow(startpath='/Users/edward/Data/Traces', hideScopeToolbox=False)\r\n\r\n # w = Synapse_MainWindow(startpath='D:/Data/Traces/2017', hideScopeToolbox=False, layout=[['Current', 'A', 1, 0], ['Stimulus', 'A', 1,0]])\r\n # w = Synapse_MainWindow(startpath='D:/Data/Traces/2016/11.November/Data 9 Nov 2016', hideScopeToolbox=False)\r\n # w = Synapse_MainWindow(startpath='D:/Data/Traces/2017/06.June/Data 30 Jun 2017', hideScopeToolbox=False)\r\n # w = Synapse_MainWindow(startpath='D:/Data/Traces/2017/08.August/Data 8 Aug 2017') # voltage clamp\r\n w.show()\r\n # Connect upon closin\r\n # app.aboutToQuit.connect(restartpyshell)\r\n # Make sure the app stays on the screen\r\n sys.exit(app.exec_())\r\n"
},
{
"alpha_fraction": 0.6516697406768799,
"alphanum_fraction": 0.6669758558273315,
"avg_line_length": 33.59358215332031,
"blob_id": "67332d2175918a8f11b547d2092f5fba65741eee",
"content_id": "14e5d72f5c9784919dce336ef1608a3612e44a0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6468,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 187,
"path": "/python_tutorials/practice_notes_4.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\n# Day 4: December 23, 2012\n\n# Difference between a list and a dictionary\n# In Python, the indices of a list must be integers;\n# However, in a dictionary, the indices can be almost any type\n# lists use square bracket '[]',\n# whereas dictionaries use curly braces '{}'\n\n# Build a dictionary that maps English words to Spanish words\neng2sp = dict();#create a new empty dictionary\nprint(eng2sp);\n#>>>{} #empty braces/dictionary\n#add elements to the dictionary\neng2sp['one']='uno'; #format: var[key]=value\neng2sp['two']='dos';\neng2sp['three']='tres';\nprint(eng2sp);\n#>>>{'two': 'dos', 'three': 'tres', 'one': 'uno'}\n#note the order is messed up and unpredictable somehow\n#however, this does not create a problem, because\n#the elements in the dictionary is not indexed by order or\n#conventional intergers, but in this case, by the string\nprint(eng2sp['four']);\n#>>>KeyError: 'four' #we cannot index something whose key does not exist\nlen(eng2sp);#find the length of the dictionary\n#>>>3\n'one' in eng2sp #check if a key 'one' exists in the variable\n#>>>True\n'uno' in eng2sp #check if a key 'uno' exists in the variable\n#>>>False\n#to find if a value exists, we have to get the values as a list first\nvals=eng2sp.values();#get only the values of the dictionary\n'uno' in vals;#check if 'uno' is in the value\n#>>>True\n#similarly, to convert a dictionary to only keys, use \"keys()\" method\nword_keys=eng2sp.keys();\nprint(word_keys);\n#>>>dict_keys(['two', 'three', 'one']) #<class 'dict_keys', not 'list'\n#in general, when we have a large number of items, dictionary is faster than\n#lists, as lists use search algorithm, whereas dictionaries use a hashtable\n#algorithm (not sure what this means)\n\n#Use dictionary as a set of counters\ndef histogram(s):\n d = dict(); #create an empty dictionary\n for c in s: #for each element of c in string s\n if c not in d:#if c is not in the dictionary d\n d[c]=1;#take a note of c the key, and count as 1, the first time\n else:\n d[c]+=1;#take a note of c the key, and increase the count\n return d;\n\n#now use this function to count the number of each letter in a word\nword_example='brontosaurus';\nh=histogram(word_example);\nprint(h);\n#>>>{'n': 1, 'o': 2, 'b': 1, 't': 1, 'u': 2, 'a': 1, 'r': 2, 's': 2}\n#again, the returned key is very much random\nh.get('f');#get the value with key 'f', otherwise, return 'None' (default)\n#>>>0\nh.get('o',-1);#get the value with key 'o', otherwise, return '-1'\n#>>>2\n#We may use get() method to redefine histogram more concisely\ndef histogram2(s):\n d = dict();\n for c in s:\n d[c]=d.get(c,0)+1;\n #reasoning:\n #if c does not exist in the dictionary, return 0+1=1;\n #if c already exists in the dictionary, return its current_value+1\n return d;\n\nh2=histogram2(word_example);\nprint(h2);\n#>>>{'n': 1, 'o': 2, 'b': 1, 't': 1, 'u': 2, 'a': 1, 'r': 2, 's': 2}\n\n#sort by keys and print dictionary by alphabetical order\ndef print_dict(d): #void function\n keys_only=list(d.keys());#get the keys as a list\n keys_only.sort();#sort the keys alphabetically\n for e in keys_only:\n print(e,d[e]);\n \nprint_dict(h2);\n#>>>print_dict(h2)\n#a 1\n#b 1\n#n 1\n#o 2\n#r 2\n#s 2\n#t 1\n#u 2\n\n# Reverse lookup\ndef reverse_lookup(d,v): #reverse look up a value v in dictionary d\n for e in d:\n if d[e]==v:\n return e;\n raise ValueError('value does not appear in the dictionary');\n #if eventually there is nothing to return\n #give out an error message 'ValueError:...'\n\n#successful reverse lookup\nreverse_lookup(h2,2);\n#>>>'o'\n#failed reverse lookup\nreverse_lookup(h2,5);\n#Traceback (most recent call last):\n# File \"<console>\", line 0, in <module>\n# File \"<console>\", line 0, in reverse_lookup\n#ValueError: value does not appear in the dictionary\n\n\n# List can be values in a dictionary, but cannot be keys\n# Inverting Dictionary\ndef invert_dict(d):\n d_inverse=dict();\n for k in d: #get the key from d\n val = d[k];#temporarily store the value at key\n if val not in d_inverse:\n d_inverse[val]=[k];#store the key (new value) as a list, since there\n #keys with the same value\n else:\n d_inverse[val].append(k);\n return d_inverse;\n\n#example\nh3=histogram2('parrots');\nprint(h3);\n#>>>\nh3_inverse=invert_dict(h3);\nprint(h3_inverse);\n\n# Definition: A 'hash' is a function that takes a value (of any type) and\n# returns as an integer. Dictionaries use these integers, called hash values, to\n# store and look up key-value pairs. Therefore, keys must be immutable, and\n# since list is mutable, it cannot be keys.\n\n#The following is a more concise version of invert_dict\ndef invert_dict2(d):\n d_inverse=dict();\n for k in d: #get the key from d\n val = d[k];#temporarily store the value at key\n d_inverse.setdefault(val,[]).append(k);\n #reasoning: very similar to histogram2\n # if key 'val' does not exist in the inverted dictionary\n # return [].append(k) to start a new key at 'val', and assign value 'k'\n # if val already eixsts\n # append another value 'k' at key 'val'\n return d_inverse;\n\n# Memo\nknown={0:0,1:1};#dictionary of first two fibonacci numbers\ndef fibonacci(n):\n if n in known:#if the number is already known\n return known[n];#reutrn from the memo\n res=fibonacci(n-1)+fibonacci(n-2);#if not, recalculate\n known[n]=res;#store the new calculation in the memo\n return res;#return calculated\n\n# Global vs. Local variables\nbeen_called = False; #This is a global variable, belong to __main__ frame\ndef example_1():\n been_called = True; #This creates a new local variable of the same name\n\ndef example_2():\n global been_called;#This will call the global variable declared previously\n been_called=True; #now, we can reassign the global variable\n \n#These variables mentioned above are immutable variables, however, if the\n#variable is mutable, then we can reassign the values without redeclaring\nknown = {0:0,1:1};\ndef example_3():\n known[2]=1;\n\n#However, to reassign the entire variable, we have to redeclare it\ndef example_4():\n global known;\n known = dict();\n \n# Long integer in Python 2\n# if an integer is very long, it is stored as type 'long' instead of type 'int'.\n# this only happens in Python 2, as Python 3 stores long integers as type 'int'.\n\n#This concludes today's study."
},
{
"alpha_fraction": 0.6109028458595276,
"alphanum_fraction": 0.6443408727645874,
"avg_line_length": 28.27914047241211,
"blob_id": "86632a65cc2d03cb0933dcb4f052613b386eb813",
"content_id": "60e02afd2e31662852a0fa46402694a6b278245a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9869,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 326,
"path": "/python_tutorials/ThinkPython/practice_notes_7.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\r\n# Day 7: January 4, 2013\r\n\r\n##############################################################################\r\n# Class and Object: user defined types\r\nclass Point(object): #notice that \"class\" and \"object\" are keywords\r\n \"\"\"Represents a point in 2D space.\"\"\" #annotation\r\n pass;\r\n \r\nprint(Point);\r\n#>>><class '__main__.Point'>\r\n\r\n#an instance of a class\r\nblank=Point();\r\nprint(blank);\r\n#>>><__main__.Point object at 0x0000000003130860>\r\n\r\n#Assigning attritubes to the class:\r\n#This is very similar to structures in MATLAB\r\n#the following assign x and y attribute (in MATLAB, fields) to instance blank\r\nblank.x=3.0;\r\nblank.y=4.0;\r\nprint(\"X-coordinate:\",blank.x);\r\nprint(\"Y-coordinate:\",blank.y);\r\n#>>>X-coordinate: 3.0\r\n#>>>Y-coordinate: 4.0\r\n#we may also do this:\r\nprint('(%g,%g)' %(blank.x,blank.y));\r\n#>>>(3,4)\r\n#it is also possible to call functions and methods with attributes\r\nimport math;\r\ndistance=math.sqrt(blank.x**2+blank.y**2); #note ** replaces ^ in Python 3\r\nprint(distance);\r\n#>>>5.0;\r\n\r\ndef distance_between_points(p1,p2):\r\n \"\"\"take in two Points objects and calculate their distance\"\"\"\r\n import math;\r\n distance=math.sqrt((p1.x-p2.x)**2+(p1.y-p2.y)**2);\r\n return distance;\r\n\r\npoint_1=Point();\r\npoint_1.x=2.3;\r\npoint_1.y=3.6;\r\npoint_2=Point();\r\npoint_2.x=10.2;\r\npoint_2.y=15.3;\r\n\r\nDIST=distance_between_points(point_1,point_2);\r\nprint(DIST);\r\n#>>>14.117365193264641\r\n\r\ndef print_point(p):\r\n print(\"(%g,%g)\" %(p.x,p.y));\r\n\r\n\r\n# Rectangle, with Points embedded\r\nclass Rectangle(object):\r\n \"\"\"Represents a rectangle.\r\n attributes: width, height, corner.\r\n \"\"\"\r\n pass;\r\n\r\nbox=Rectangle();\r\nbox.width=100.0;\r\nbox.height=200.0;\r\nbox.corner=Point();#Point object is embedded within Rectangle instance\r\nbox.corner.x=0.0;\r\nbox.corner.y=0.0;\r\n\r\n#instance can be a return value\r\ndef find_center(rect):\r\n p=Point();\r\n p.x=rect.corner.x+rect.width/2.0;\r\n p.y=rect.corner.y+rect.height/2.0;\r\n return p;\r\n\r\ncenter=find_center(box);\r\nprint_point(center);\r\n#>>>(50,100)\r\n\r\n#Customized objects are mutable\r\nprint(box.width);\r\n#>>>100.0\r\nbox.width=box.wdith+10;\r\nprint(box.width);\r\n#>>>110.0\r\n\r\n#Since they are mutable, there may be potentially problems wth aliasing\r\n#however, there si a module \"copy\" we can use to duplicate the object\r\nimport copy;\r\n\r\nbox2=copy.copy(box); #shallow copy, which does not copy the embedded elements\r\n\r\nbox is box2;\r\n#>>>False\r\nbox==box2;\r\n#>>>False #because in object, \"==\" operator is the same as \"is\" operator\r\n\r\n#Also, the shallow copy does not copy the embeded objects\r\nbox.corner is box2.corner;\r\n#>>>True\r\n\r\n#to do a deep copy, use copy.deepcopy\r\nbox3=copy.deepcopy(box);\r\nbox.corner is box3.corner;\r\n#>>>False\r\n\r\n#if uncertain what attributes that an object have, use hasattr(object,'attr');\r\nhasattr(box,'x');\r\n#>>>False\r\nhasattr(box,'corner');\r\n#>>>True\r\n\r\n##############################################################################\r\n# Class and Function\r\nclass Time(object):\r\n \"\"\"Represents the time of the day.\r\n attributes: hour, minute, second\r\n \"\"\"\r\n pass;\r\n\r\ntime=Time();\r\ntime.hour=11;\r\ntime.minute=59;\r\ntime.second=30;\r\n\r\ndef print_time(t):\r\n print('%.2d:%.2d:%.2d' %(t.hour,t.minute,t.second));\r\n#note that %.2d prints 2 digits\r\n\r\n#Pure functions and Modifiers:\r\ndef add_time(t1,t2): #pure function\r\n \"\"\"Adding two time\"\"\"\r\n SUM = Time();\r\n SUM.hour=t1.hour+t2.hour;\r\n SUM.minute=t1.minute+t2.minute;\r\n SUM.second=t1.second+t2.second;\r\n return SUM;\r\n\r\n#pure function does not modify any of the objects passed onto its arguments\r\n#in this case, t1 and t2 are not changed at all\r\n\r\n#Test the function\r\nstart=Time();#specifying start time\r\nstart.hour=9;\r\nstart.minute=45;\r\nstart.second=0;\r\n\r\nduration=Time();#specifying duration\r\nduration.hour=1;\r\nduration.minute=35;\r\nduration.second=0;\r\n\r\nendTime=add_time(start,duration);#calculating end time\r\nprint_time(endTime);#print end time\r\n#>>>10:80:00\r\n\r\n#however, this is not what we expected for time in real life, therefore, we\r\n#need modifier functions\r\ndef increment(time,seconds):#a modifer function changes its input\r\n time.second+=seconds; #increase the time by specified seconds\r\n \r\n if time.second>60:#if second greater than 60\r\n time.minute+=time.second//60;#increase minute by quotient\r\n time.second=time.second%60;#find the remainder after dividing 60\r\n \r\n if time.minute>=60:\r\n time.hour+=time.minute//60;\r\n time.minute=time.minute%60;\r\n#we may also invoke a recursion in the function, but it may be less efficient\r\n\r\nincrement(endTime,0);\r\nprint_time(endTime);\r\n#>>>11:20:00\r\n\r\n# Prototype vs. Patch: write, test, and retest to correct errors\r\n#we can either write a pure function that includes all the algorithms,\r\n#or we can create different parts of that function by creating simpler\r\n#individual functions which can be called into another function that\r\n#carries out the goal. This is called planned development, which usually\r\n#involves high-level insights that breaks down the problem.\r\n\r\n##############################################################################\r\n# Class and Method:\r\n#Difference between method and function:\r\n#1). Methods are defined inside a class in order to make the relationship\r\n#between class and the method clear\r\n#2). They syntax for invoking a method is different from the syntax for calling\r\n#a function\r\n\r\n#To create a method inside a class is like create a function, except it is\r\n#under the class object, rather than the __main__\r\nclass Time(object):\r\n def print_time(time):#this first parameter of the method is usually called\r\n #self, so we may use \"self\" instead of \"time\"\r\n print('%.2d:%.2d:%.2d' %(time.hour,time.minute,time.second));\r\n\r\n#Testing\r\nStartTime=Time();\r\nStartTime.hour=2;\r\nStartTime.minute=34;\r\nStartTime.second=31;\r\nTime.print_time(StartTime);#now print_time is a method of Time\r\n#>>>02:34:31\r\n#we can also use method syntax to get the same result\r\nStartTime.print_time();\r\n#>>>02:34:31\r\n#in this case, \"StartTime\" is the subject with method \"print_time\"\r\n\r\n#We now creates several methods for class Time. Note that it is important\r\n#to leave NO empty line between each method, at least in Komodo.\r\ndef int_to_time(seconds):\r\n \"\"\"convert seconds in integer to a time object\"\"\"\r\n time=Time();\r\n minutes,time.second=divmod(seconds,60);\r\n time.hour,time.minute=divmod(minutes,60);\r\n return time;\r\n#the reason not to put this function inside Time as a method: the input is\r\n#an integer, not a Time object.\r\n\r\nclass Time(object):\r\n def print_time(self):\r\n \"\"\"print time object\"\"\"\r\n print('%.2d:%.2d:%.2d' %(self.hour,self.minute,self.second));\r\n def time_to_int(self):\r\n \"\"\"convert a time object to integer\"\"\"\r\n minutes=self.hour*60+self.minute;\r\n seconds=minutes*60+self.second;\r\n return seconds;\r\n def increment(self,seconds):\r\n \"\"\"increase a time object by a specified seconds\"\"\"\r\n seconds+=self.time_to_int();\r\n return int_to_time(seconds);\r\n def is_after(self,other):\r\n \"\"\"check if a time is after another time\"\"\"\r\n return self.time_to_int()>other.time_to_int();\r\n def __init__(self,hour=0,minute=0,second=0):\r\n \"\"\"__init__ method initilize the object with default values\"\"\"\r\n self.hour=hour;\r\n self.minute=minute;\r\n self.second=second;\r\n def __str__(self):\r\n \"\"\"convert the object to a string. This allows the object to be\r\n printed directly using 'print'. \"\"\"\r\n return '%.2d:%.2d:%.2d' %(self.hour,self.minute,self.second);\r\n def add_time(self,other):\r\n \"\"\"allows the addition of two times given\"\"\"\r\n seconds=self.time_to_int()+other.time_to_int();\r\n return int_to_time(seconds);\r\n def __add__(self,other):#this __add__ method checks type of \"other\"\r\n \"\"\"adds time together\"\"\"\r\n if isinstance(other,Time):\r\n return self.add_time(other);\r\n elif isinstance(other,int):\r\n return self.increment(other);\r\n def __radd__(self,other):\r\n \"\"\"gives communitative property of addition to the class object\"\"\"\r\n return self.__add__(other);\r\n\r\n#testing\r\nstart=Time();\r\nstart.hour=1;\r\nstart.minute=32;\r\nstart.second=41;\r\nend=Time();\r\nend.hour=2;\r\nend.minute=34;\r\nend.second=24;\r\nend.is_after(start);#chekc to see if end time is after start time\r\n#>>>True\r\n\r\n#testing __init__ method\r\ntime=Time();\r\ntime.print_time();\r\n#>>>00:00:00\r\ntime=Time(9);\r\ntime.print_time();\r\n#>>>09:00:00\r\ntime=Time(9,30);\r\ntime.print_time();\r\n#>>>09:30:00\r\ntime=Time(9,30,42);\r\ntime.print_time();\r\n#>>>09:30:42\r\n\r\n#testing __str__ method\r\ntime=Time(9,45);\r\nprint(time);#\"print\" invokes \"__str__\" method\r\n#>>>09:45:00\r\n\r\n#testing __add__ method\r\nstart=Time(9,45);\r\nduration=Time(1,35);\r\nprint(start+duration); #the \"+\" should invoke \"__add__\" method\r\n#>>>11:20:00\r\nduration=30;#30 seconds of duration\r\nprint(start+duration);\r\n#>>>09:45:30\r\n#however, the addition is not communitative\r\nprint(duration+start);\r\n#>>>TypeError: unsupported operand type(s) for +: 'int' and 'Time'\r\n#this can be solved using __radd__ or \"right_side add\"\r\n#it is invoked when the Time object is appears on the right side of the\r\n#\"+\" operator\r\n#after adding __radd__ method, try add again\r\nstart=Time(9,45);\r\nduration=30;\r\nprint(duration+start);\r\n#>>>09:45:30;\r\n\r\n# Polymorphism: functions that can work with several types\r\n#for example, sum() is polymorphic and adds up the objects as long as the object\r\n#itself supports addition\r\nt1=Time(7,43);\r\nt2=Time(7,41);\r\nt3=Time(7,37);\r\ntotal=sum([t1,t2,t3]);\r\nprint(total);\r\n#>>>23:01:00\r\n\r\n#Use __dict__ method to print out a dictionary of attributes and values\r\nprint(t1.__dict__);\r\n#>>>{'hour': 7, 'minute': 43, 'second': 0}\r\n\r\n#This concludes today's study."
},
{
"alpha_fraction": 0.6495575308799744,
"alphanum_fraction": 0.6853097081184387,
"avg_line_length": 34.71428680419922,
"blob_id": "7200531e4f1c0620784f43cf9559dbebfc09d482",
"content_id": "09ec7ace2dabb61dabc40534aa6419b02827e433",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2825,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 77,
"path": "/python_tutorials/ThinkPython/practice_notes_1.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# Python 3.3.0 Practice Notes\r\n# Day 1: November 23, 2012\r\n\r\n# print()\r\n#default delimiter is \\n, which prints at a new line every line of print()\r\nprint('Hello, world!',\"I am okay\");#use single or double quotes are both fine\r\n\r\n#len()\r\nlen(\"asdffg\");#returns the length of the string\r\n\r\n# Converting between letter and integer (ASCII)\r\nord('a'); #--> integer\r\nchr(97); #--> unicode character\r\n\r\n# Concatenation\r\nfirst = 'throat';\r\nsecond = ' warbler';\r\nprint(first + second);\r\n#another example of concatenation\r\nthis_word = 'Spam';\r\nprint(this_word*3);# 'this_word', then, will be repeated 3 times in one string\r\n\r\n# Difference in Division between Python2 and Python3\r\n#In Python 2, / is the floor division,\r\n#whereas in Python 3, // is the floor division. This means, even if one of the number is float\r\n#if we call // in division operation, it is going to perform a floor division first,\r\n#Then convert the result to a float.\r\n#In Python 2, to use float division, we must convert one of the number into floats\r\n#whereas in Python 3, / is the float division\r\n\r\n# Checking the type of a variable / object\r\ntype(32); #--><type 'int'>\r\ntype ('32'); #--><type 'str'>\r\n\r\n# Type Conversion\r\nint('32'); #--> 32 from type str to type int\r\nint(3.99999); #--> 3\r\nint(-2.3333); #--> 2\r\nfloat(2); #-->2.0, from type int to type float\r\nfloat('23.424'); # 23.424, from type str to type float\r\nstr(32.32); #-->'32.32', from type float to type str\r\n\r\n# Math Modules and associated funtions\r\nimport math;#import math modules\r\nprint(math.pi);#returns constant pi\r\nprint(math.e);#returns natural number e\r\nprint(math.log(3,4));#returns log base 4 of 3\r\nprint(math.log10(20.3));#returns log base 10 of 20.3\r\nprint(math.log2(23));#returns log base 2 of 23, more accurate than using log(x,base)\r\nprint(math.exp(3));#returns e to the 3rd power\r\nprint(math.pow(2,3));#returns 2 raised to the 3rd power\r\nprint(math.sqrt(3));#returns square root of 3\r\n#other functions\r\n#math.sin, math.cos, math.tan,\r\n#math.atan2 (returns value in radians between -pi and pi)\r\n#math.degrees(x), math.radians(x)\r\n#For complex number, \"import cmath\" instead of \"import math\"\r\n#use cmath as the name of the module to call out these functions\r\n#We may also do\r\nfrom math import * #import all functions from the math module\r\npi #we now can use the functions from math directly, without typing math. every time\r\n\r\n# Functions\r\nmath_eq1=1+1;\r\nmath_eq2=2+1;\r\nmath_eq3=math.pi;\r\ndef let_it_all_out(a,b,c): #don't forget the colon after the parenthesis (which is for argument inputs)!\r\n print(\"Okay, let's do some math\");\r\n print(a);\r\n print(b);\r\n print(c);\r\n print(\"Good Job!\");\r\n #an empty line to signal the end of the function\r\n#now, call the function\r\nlet_it_all_out(math_eq1,math_eq2,math_eq3);\r\n\r\n#This concludes today's study."
},
{
"alpha_fraction": 0.4377158284187317,
"alphanum_fraction": 0.4795264005661011,
"avg_line_length": 35.19266128540039,
"blob_id": "bf7771bc7ba2bb1373b5c95704027fb53b9f474d",
"content_id": "81acca4a9c4b8e6b5d91e4d60135f45f1fcd06a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8108,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 218,
"path": "/Plots/simple/ellipsoid.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 05 21:07:31 2015\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D # for plotting 3D\r\n\r\ndef spm_matrix(P, order='T*R*Z*S'):\r\n \"\"\"Python adaptaion of spm_matrix\r\n returns an affine transformation matrix\r\n FORMAT [A, T, R, Z, S] = spm_matrix(P, order)\r\n P(1) - x translation\r\n P(2) - y translation\r\n P(3) - z translation\r\n P(4) - x rotation about - {pitch} (radians)\r\n P(5) - y rotation about - {roll} (radians)\r\n P(6) - z rotation about - {yaw} (radians)\r\n P(7) - x scaling\r\n P(8) - y scaling\r\n P(9) - z scaling\r\n P(10) - x affine\r\n P(11) - y affine\r\n P(12) - z affine\r\n\r\n order (optional) application order of transformations.\r\n\r\n A - affine transformation matrix\r\n ___________________________________________________________________________\r\n\r\n spm_matrix returns a matrix defining an orthogonal linear (translation,\r\n rotation, scaling or affine) transformation given a vector of\r\n parameters (P). By default, the transformations are applied in the\r\n following order (i.e., the opposite to which they are specified):\r\n\r\n 1) shear\r\n 2) scale (zoom)\r\n 3) rotation - yaw, roll & pitch\r\n 4) translation\r\n\r\n This order can be changed by calling spm_matrix with a string as a\r\n second argument. This string may contain any valid MATLAB expression\r\n that returns a 4x4 matrix after evaluation. The special characters 'S',\r\n 'Z', 'R', 'T' can be used to reference the transformations 1)-4)\r\n above. The default order is 'T*R*Z*S', as described above.\r\n\r\n SPM uses a PRE-multiplication format i.e. Y = A*X where X and Y are 4 x n\r\n matrices of n coordinates.\r\n\r\n __________________________________________________________________________\r\n Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging\r\n\r\n Karl Friston\r\n $Id: spm_matrix.m 1149 2008-02-14 14:29:04Z volkmar $\r\n \"\"\"\r\n # pad P with 'null' parameters\r\n #---------------------------------------------------------------------------\r\n q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])\r\n P = np.concatenate((P,q[(np.size(P) + 1):12]))\r\n\r\n # Transformation matrices\r\n T = np.array([[1,0,0,P[0]], # translation\r\n [0,1,0,P[1]],\r\n [0,0,1,P[2]],\r\n [0,0,0,1]])\r\n\r\n R1 = np.array([[1, 0, 0, 0],\r\n [0, np.cos(P[3]), np.sin(P[3]), 0],\r\n [0, -np.sin(P[3]), np.cos(P[3]), 0],\r\n [0, 0, 0, 1]])\r\n\r\n R2 = np.array([[np.cos(P[4]), 0, np.sin(P[4]), 0],\r\n [0, 1, 0, 0],\r\n [-np.sin(P[4]), 0, np.cos(P[4]), 0],\r\n [0, 0, 0, 1]])\r\n\r\n R3 = np.array([[np.cos(P[5]), np.sin(P[5]), 0, 0],\r\n [-np.sin(P[5]), np.cos(P[5]), 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]])\r\n\r\n R = R1.dot(R2).dot(R3) # rotation\r\n\r\n Z = np.array([[P[6], 0, 0, 0], # scale\r\n [0, P[7], 0, 0],\r\n [0, 0, P[8], 0,],\r\n [0, 0, 0, 1]])\r\n\r\n S = np.array([[1, P[9], P[10], 0], # shear\r\n [0, 1, P[11], 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]])\r\n\r\n # order = '('+order.replace('*', ').dot(')+')' # convert to np dot product\r\n # A = eval(order)\r\n v = {'T':T, 'R':R, 'Z':Z, 'S':S}\r\n l = order.split('*')\r\n A = v[l[0]].dot(v[l[1]]).dot(v[l[2]]).dot(v[l[3]])\r\n\r\n def is_numeric(obj): # to check if a numpy object is numeric\r\n attrs = ['__add__', '__sub__', '__mul__', '__div__', '__pow__']\r\n return all(hasattr(obj, attr) for attr in attrs)\r\n\r\n if not is_numeric(A) or np.ndim(A)!=2 or any([s!=4 for s in np.shape(A)]):\r\n raise(IOError(\\\r\n \"Order expression '%s' did not return a valid 4x4 matrix.\"%(order)))\r\n\r\n return(A, T, R, Z, S)\r\n \r\ndef rmat2RPY(rmat):\r\n \"\"\"convert a rotation matrix back to roll, pitch, yaw\r\n http://planning.cs.uiuc.edu/node103.html \r\n \"\"\"\r\n pitch = np.arctan(rmat[1,0]/rmat[0,0]) # x\r\n roll = np.arctan(-rmat[2,0]/np.sqrt(rmat[2,1]**2 + rmat[2,2]**2)) #y\r\n yaw = np.arctan(-rmat[2,1]/rmat[2,2]) #z\r\n return(pitch, roll, yaw)\r\n\r\ndef Ellipsoid(center, radii, rvec=np.eye(3), numgrid=100):\r\n \"\"\"Matrix description of ellipsoid\r\n center: center [x0,y0,z0]\r\n radii: radii of the ellipsoid [rx, ry, rz]\r\n rvec: vector of the radii that indicates orientation. Default identity\r\n numgrid: number of points to estimate the ellipsoid. The higher the\r\n number, the smoother the plot. Defualt 100.\r\n return: x, y, z coordinates\r\n \"\"\"\r\n # Spherical coordinate\r\n u = np.linspace(0.0, 2.0*np.pi, numgrid) # 100 grid resolution\r\n v = np.linspace(0, np.pi, numgrid-10) #100 grid resolution\r\n # Convert to Cartesian\r\n x = radii[0] * np.outer(np.cos(u), np.sin(v))\r\n y = radii[1] * np.outer(np.sin(u), np.sin(v))\r\n z = radii[2] * np.outer(np.ones_like(u), np.cos(v))\r\n X = np.rollaxis(np.array([x,y,z]), 0, 3)\r\n X = X.dot(rvec.T) + center.reshape((1, 1, -1)) # rotation and translation\r\n return(X[:,:,0], X[:,:,1], X[:,:,2])\r\n\r\ndef SimulateEllipsoid(P=None):\r\n \"\"\"Generate a cloud of points within an ellipsoid given parameter P\"\"\"\r\n if P is None:\r\n P = np.array([1,2,3,np.pi/3, np.pi/4, np.pi/6,4,5,6,0,0,0])\r\n A, _, R, _, _ = spm_matrix(P)\r\n # make up data\r\n X = np.concatenate((np.random.randn(1000,3), np.ones((1000,1))),axis=1)\r\n # Transform data\r\n X = X.dot(A.T)\r\n # homogenize data\r\n X = X[:,:-1] / X[:,-1][...,np.newaxis]\r\n # Reverse to extract parameters\r\n # mean\r\n centroid = np.mean(X, axis=0)\r\n # subtract mean\r\n Y = X - centroid\r\n # principle component analysis\r\n U, S, V = np.linalg.svd(1.0/np.sqrt(Y.shape[0])*Y)\r\n # retrieve radii\r\n radii = S # variance = S**2\r\n # retrieve rotation\r\n rvec = V\r\n return(X, centroid, radii, rvec)\r\n\r\ndef P2M(P):\r\n \"\"\"polynomail to matrix form\"\"\"\r\n nd = (np.sqrt(np.size(P)*8+9)-3.0)/2.0\r\n M = np.eye(1.0 + nd)\r\n M = M*np.diag(np.concatenate((P[0:nd],np.array([-1.0]))))*0.5\r\n count = nd\r\n for ind in xrange(int(nd)):\r\n M[(ind+1):-1, ind] = P[count:(count+nd-ind-1)]\r\n count +=nd-ind-1\r\n M[-1,:-1] = P[-nd:]\r\n M = M + M.T\r\n return(M)\r\n\r\ndef M2P(M):\r\n \"\"\"Matrix to polynomial form\"\"\"\r\n P = np.diag(M)[:-1]\r\n for ind in xrange(np.shape(M)[0]):\r\n P = np.concatenate((P, M[(ind+1):-1, ind]), axis=1)\r\n P = np.concatenate((P,M[-1,:-1]), axis=1)\r\n return(P)\r\n\r\nif __name__ == '__main__':\r\n # Start figure\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111,projection='3d')\r\n # simulate data\r\n P = np.array([1,2,3,0, np.pi/4, 0,5,10,15,0,0,0])\r\n #X, _, _, _ = SimulateEllipsoid(P)\r\n X, _center, _radii, _rvec = SimulateEllipsoid(P)\r\n x, y, z = X[:,0], X[:,1], X[:,2]\r\n ax.scatter(x, y, z, color='k', alpha=0.1)\r\n\r\n # calcualte ellipsoid fit\r\n _, center, rvec, radii, _ = spm_matrix(P)\r\n rvec = rvec[:3,:3]\r\n center = center[:3,-1]\r\n radii = np.diag(radii)[:3]\r\n x,y,z = Ellipsoid(center, radii, rvec)\r\n ax.plot_surface(x,y,z, rstride=4, cstride=4, color='b', linewidth=0, alpha=0.7)\r\n\r\n # show principla axes\r\n for n, XYZ in enumerate(rvec.T):\r\n x, y, z = zip(center, XYZ * radii[n] + center)\r\n ax.plot(x, y, z)\r\n\r\n #ax.dist=2\r\n ax.view_init(elev=0, azim=0) # set perspective\r\n #ax.dist = 5\r\n\r\n # label axes\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_zlabel('z')\r\n"
},
{
"alpha_fraction": 0.4515981674194336,
"alphanum_fraction": 0.4593607187271118,
"avg_line_length": 27.763158798217773,
"blob_id": "f9242528ccd3c9c4ffeeda67dc46bd7bb21c4238",
"content_id": "d8a4c0674bf39cab3836b83b4cb492e455f6f245",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2190,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 76,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/filtering.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Filtering routines.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom scipy import signal\nfrom kwiklib.utils.six.moves import range\n\n\n# -----------------------------------------------------------------------------\n# Signal processing functions\n# -----------------------------------------------------------------------------\ndef bandpass_filter(**prm):\n \"\"\"Bandpass filtering.\"\"\"\n rate = prm['sample_rate']\n order = prm['filter_butter_order']\n low = prm['filter_low']\n high = prm['filter_high']\n return signal.butter(order,\n (low/(rate/2.), high/(rate/2.)),\n 'pass')\n\ndef apply_filter(x, filter=None):\n if x.shape[0] == 0:\n return x\n b, a = filter\n try:\n out_arr = signal.filtfilt(b, a, x, axis=0)\n except TypeError:\n out_arr = np.zeros_like(x)\n for i_ch in range(x.shape[1]):\n out_arr[:, i_ch] = signal.filtfilt(b, a, x[:, i_ch])\n return out_arr\n\ndef decimate(x):\n q = 16\n n = 50\n axis = 0\n\n b = signal.firwin(n + 1, 1. / q, window='hamming')\n a = 1.\n\n y = signal.lfilter(b, a, x, axis=axis)\n\n sl = [slice(None)] * y.ndim\n sl[axis] = slice(n // 2, None, q)\n\n return y[sl]\n\n\n# -----------------------------------------------------------------------------\n# Whitening\n# -----------------------------------------------------------------------------\n\"\"\"\n * Get the first chunk of data\n * Detect spikes the usual way\n * Compute mean on each channel on non-spike data\n * For every pair of channels:\n * estimate the covariance on non-spike data\n * Get the covariance matrix\n * Get its square root C' (sqrtm)\n * Get u*C' + (1-u)*s*Id, where u is a parameter, s the std of non-spike data\n across all channels\n * Option to save or not whitened data in FIL\n * All spike detection is done on whitened data\n\n\"\"\"\ndef get_whitening_matrix(x):\n C = np.cov(x, rowvar=0)\n # TODO\n\ndef whiten(x, matrix=None):\n if matrix is None:\n matrix = get_whitening_matrix(x)\n # TODO\n\n\n\n\n"
},
{
"alpha_fraction": 0.5512328147888184,
"alphanum_fraction": 0.5648192763328552,
"avg_line_length": 49.019287109375,
"blob_id": "38d92e4cfcd5ba868bd5e33a7f60f6bee34a4ace",
"content_id": "791d063212a5a20b98a11b7e6d53544904cba43e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84643,
"license_type": "no_license",
"max_line_length": 215,
"num_lines": 1659,
"path": "/PySynapse/app/Toolbox.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated: Fri Apr 7 22:12:21 2017\r\n\r\nSide dock toolbox for Scope window.\r\nMethods that call self.friend assumes that the Scope window is already running (instance created)\r\n\r\n@author: Edward\r\n\"\"\"\r\n\r\n# Global variables\r\nold = True # load old data format\r\n# colors = readini(os.path.join(__location__,'../resources/config.ini'))['colors']\r\nignoreFirstTTL = True # Ignore the first set of TTL Data when parsing TTL pulse protocols\r\n\r\nfrom app.AccordionWidget import AccordionWidget\r\nfrom app.Annotations import *\r\nfrom util.spk_util import *\r\nfrom util.ImportData import NeuroData\r\n\r\nfrom scipy.signal import butter, filtfilt\r\nfrom scipy.optimize import curve_fit\r\n\r\nfrom pdb import set_trace\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtCore.QCoreApplication.translate(context, text, disambig)\r\n\r\n\r\nclass Toolbox(QtWidgets.QWidget):\r\n \"\"\"Collapsible dock widget that displays settings and analysis results for the Scope window\r\n \"\"\"\r\n _widget_index = 0 # Keep track of position of the widget added\r\n _sizehint = None\r\n # used for replace formula variables, total allow 52 replacements, from a-zA-Z\r\n _newvarsList = [chr(i) for i in 65+np.arange(26)]+[chr(i) for i in 97+np.arange(26)]\r\n # Annotatable objects\r\n def __init__(self, parent=None, friend=None):\r\n super(Toolbox, self).__init__(parent)\r\n self.parent = parent\r\n self.friend = friend\r\n self.detectedEvents = []\r\n self.eventArtist = [] # list of IDs\r\n self.annotationArtists = [] # list of IDs\r\n self.setupUi()\r\n\r\n def setupUi(self):\r\n self.verticalLayout = self.parent.layout()\r\n # self.setLayout(self.verticalLayout)\r\n self.accWidget = AccordionWidget(self)\r\n\r\n # Add various sub-widgets, which interacts with Scope, a.k.a, friend\r\n self.accWidget.addItem(\"Arithmetic\", self.arithmeticWidget(), collapsed=True)\r\n self.accWidget.addItem(\"Annotation\", self.annotationWidget(), collapsed=True)\r\n self.accWidget.addItem(\"Channels\", self.layoutWidget(), collapsed=True)\r\n self.accWidget.addItem(\"Curve Fit\", self.curvefitWidget(), collapsed=True)\r\n self.accWidget.addItem(\"Event Detection\", self.eventDetectionWidget(), collapsed=True)\r\n self.accWidget.addItem(\"Filter\", self.filterWidget(), collapsed=True)\r\n self.accWidget.addItem(\"Function\", self.functionWidget(), collapsed=False)\r\n\r\n self.accWidget.setRolloutStyle(self.accWidget.Maya)\r\n self.accWidget.setSpacing(0) # More like Maya but I like some padding.\r\n self.verticalLayout.addWidget(self.accWidget)\r\n\r\n # <editor-fold desc=\"Trace arithmetic tools\">\r\n # --------- Trace arithmetic tools ---------------------------------------\r\n def arithmeticWidget(self):\r\n \"\"\"Setting widget for trace manipulation\"\"\"\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.setObjectName(_fromUtf8(\"ArithmeticWidgetFrame\"))\r\n # widgetFrame.layout().setSpacing(0)\r\n\r\n calculateButton = QtGui.QPushButton(\"Calculate\")\r\n # Remove baseline from the trace check box\r\n nullCheckBox = QtGui.QCheckBox(\"Null\")\r\n nullCheckBox.setToolTip(\"Remove baseline\")\r\n # null baseline range\r\n rangeTextBox = QtGui.QLineEdit()\r\n rangeTextBox.setToolTip(\"Range of baseline.\\nEnter a single number or a range [min, max] in ms\")\r\n rangeTextBox.setText(\"0\")\r\n # Range unit label\r\n rangeUnitLabel = QtGui.QLabel(\"ms\")\r\n\r\n # Apply filtering before calculation\r\n filtCheckBox = QtGui.QCheckBox(\"Apply filter before calculation\")\r\n filtCheckBox.setToolTip('Apply a filter, defined in the \"Filter\" tool onto each episode, before doing any calculation')\r\n\r\n # Formula\r\n formulaTextBox = QtGui.QLineEdit()\r\n formulaTextBox.setPlaceholderText(\"Formula\")\r\n Tooltips = \"Examples:\\n\"\r\n Tooltips += \"Mean: (S1.E1 + S1.E2 + S1.E3) / 3\\n\"\r\n Tooltips += \"Diff between episodes: S1.E1-S1.E2\\n\"\r\n Tooltips += \"Calculation between regions: S1.E1[500,700] - S1.E2[800,1000]\\n\"\r\n Tooltips += \"Multiple manipulations: S1.E1 - S1.E2; S1.E3 - S1.E4; S1.E5 - S1.E6\\n\"\r\n Tooltips += \"Short hand of S1.E1+S1.E2+S1.E3+S1.E4: S1.E1~4\"\r\n formulaTextBox.setToolTip(Tooltips)\r\n\r\n # Report box\r\n arithReportBox = QtGui.QLabel(\"Arithmetic Results\")\r\n arithReportBox.setStyleSheet(\"background-color: white\")\r\n arithReportBox.setWordWrap(True)\r\n\r\n # Connect all the items to calculationevents\r\n nullCheckBox.stateChanged.connect(lambda checked: self.nullTraces(checked, rangeTextBox))\r\n calculateButton.clicked.connect(lambda: self.calculateTraces(formulaTextBox.text(), nullCheckBox.checkState(), filtCheckBox.checkState(), arithReportBox))\r\n formulaTextBox.returnPressed.connect(lambda: self.calculateTraces(formulaTextBox.text(), nullCheckBox.checkState(), filtCheckBox.checkState(), arithReportBox))\r\n\r\n # Organize all the items in the frame\r\n widgetFrame.layout().addWidget(calculateButton, 0, 0, 1, 3)\r\n widgetFrame.layout().addWidget(nullCheckBox, 1, 0)\r\n widgetFrame.layout().addWidget(rangeTextBox, 1, 1)\r\n widgetFrame.layout().addWidget(rangeUnitLabel, 1, 2)\r\n widgetFrame.layout().addWidget(filtCheckBox, 2, 0, 1, 3)\r\n widgetFrame.layout().addWidget(formulaTextBox, 3, 0, 1, 3)\r\n widgetFrame.layout().addWidget(arithReportBox, 4, 0, 1, 3)\r\n\r\n return widgetFrame\r\n\r\n def nullTraces(self, checked, rangeTextBox):\r\n self.friend.isnull = checked\r\n # parse the range\r\n r = rangeTextBox.text()\r\n if \"[\" not in r: # presumbaly a single number\r\n self.friend.nullRange = float(r)\r\n else: # parse the range\r\n r=r.replace(\"[\",\"\").replace(\"]\",\"\").replace(\",\",\" \")\r\n self.friend.nullRange = [float(k) for k in r.split()]\r\n\r\n # Redraw episodes\r\n index = list(self.friend.index) # keep the current index. Make a copy\r\n episodes = self.friend.episodes # keep the current episode\r\n self.friend.updateEpisodes(episodes=episodes, index=[], updateLayout=False) # clear all the episodes\r\n self.friend.updateEpisodes(episodes=episodes, index=index, updateLayout=False) # redraw all the episodes\r\n\r\n def calculateTraces(self, formula, isNulled, isfilt, arithReportBox):\r\n if not formula or formula==\"Formula\":\r\n return\r\n arithReportBox.setText('') # clear any previous error message first\r\n if isNulled:\r\n r = self.friend.nullRange # should have been already calculated before\r\n else:\r\n r = None\r\n\r\n def parseTilda(f):\r\n \"\"\"Turn \"S1.E2~4\" into\r\n \"(S1.E2+S1.E3+S1.E4)\"\r\n \"\"\"\r\n\r\n if \"~\" not in f:\r\n return f\r\n\r\n # Assuming the S#.E# structure\r\n ep_ranges = re.findall('S(\\d+)\\.E(\\d+)~(\\d+)', f)\r\n for m, ep in enumerate(ep_ranges):\r\n epsl = [\"S{}.E{:d}\".format(ep[0], i) for i in np.arange(int(ep[1]), int(ep[2])+1, 1)]\r\n epsl = \"(\"+\"+\".join(epsl)+\")\"\r\n f = re.sub('S(\\d+)\\.E(\\d+)~(\\d+)', epsl, f, count=1)\r\n\r\n return f\r\n\r\n def parseSimpleFormula(f):\r\n \"\"\"Simple linear basic four operations\r\n e.g. f = \"S1.E1 + S1.E2 - S1.E3 / 2 + S1.E4 * 3 / 8 +5\" -->\r\n D = [S1.E1, S1.E2, S1.E3, S1.E4], K = [1, 1, -0.5, 0.375]\r\n C = 5 (constant term)\r\n If each episode is followed by a range, e.g.\r\n f = \"S1.E1[100,500] + S1.E2[600,1000] - S1.E3[200,600]/ 2 + S1.E4[700,1100] * 3 / 8 +5\",\r\n also return the range R = [[100,500], [600,1000], [200,600], [700,1100]]. Otherwis, R = None\r\n \"\"\"\r\n # separate the formula first\r\n groups = [s.replace(\" \",\"\") for s in filter(None, re.split(r\"(\\+|-)\", f))]\r\n D = [] # data variable\r\n K = [] # scale factors\r\n C = 0 # constant\r\n R = []\r\n\r\n for n, g in enumerate(groups):\r\n # initialize scale factor\r\n if n==0 or groups[n-1] == '+':\r\n k = 1\r\n elif groups[n-1] == '-':\r\n k = -1\r\n\r\n if g == \"-\" or g == \"+\":\r\n continue\r\n elif isstrnum(g): # constants\r\n C += k * str2numeric(g)\r\n elif \"/\" not in g and \"*\" not in g: # single episodes\r\n D.append(g)\r\n K.append(k) # scale factor\r\n elif \"/\" in g or \"*\" in g:\r\n hubs = [s.replace(\" \",\"\") for s in filter(None, re.split(r\"(\\*|/)\", g))]\r\n for m, h in enumerate(hubs):\r\n if h == '*' or h == '/':\r\n continue\r\n elif isstrnum(h):\r\n # examine the operator before\r\n if m == 0 or hubs[m-1] == '*':\r\n k *= str2numeric(h)\r\n elif hubs[m-1] == '/':\r\n k = k/str2numeric(h)\r\n else:\r\n arithReportBox.setText(\"Unrecognized operation \" + hubs[m-1])\r\n return\r\n else: # Data variable\r\n D.append(h)\r\n K.append(k)\r\n else: # fall through for some reason. Need check\r\n arithReportBox.setText(\"Unexpected formula\")\r\n return\r\n\r\n # Further separate D and R\r\n bool_has_range = False\r\n for count_g, g in enumerate(D):\r\n if \"[\" in g:\r\n if not bool_has_range:\r\n bool_has_range = True\r\n g, rng_tmp = re.split(\"\\[\", g)\r\n D[count_g] = g\r\n R.append(str2num(\"+\"+rng_tmp))\r\n\r\n # Double check the length of D and R matches\r\n if bool_has_range and len(D) != len(R):\r\n arithReportBox.setText(\"Specified ranges must follow each episode.\")\r\n return\r\n\r\n return D, K, C, R\r\n\r\n def simpleMath(f, stream, channel, **kwargs):\r\n \"\"\"\" f = \"S1.E1 + S1.E2 - S1.E3 / 2 + S1.E4 * 3 / 8\"\r\n Additional variables can be provided by **kwargs\"\"\"\r\n D, K, Y, R = parseSimpleFormula(f)\r\n if not R:\r\n R = [[]] * len(K)\r\n\r\n for d, k, w in zip(D, K, R):\r\n if d not in kwargs.keys():\r\n # load episodes\r\n try:\r\n yind = self.friend.episodes['Epi'].index(d)\r\n except:\r\n # arithReportBox.setText(d + \" is not a valid episode\")\r\n return\r\n\r\n if not self.friend.episodes['Data'][yind]: # if empty\r\n self.friend.episodes['Data'][yind] = NeuroData(dataFile=self.friend.episodes['Dirs'][yind], old=old, infoOnly=False)\r\n\r\n y = getattr(self.friend.episodes['Data'][yind], stream)[channel] # get the time series\r\n # Window the episode if R is not empty\r\n if w:\r\n y = spk_window(y, self.friend.episodes['Data'][yind].Protocol.msPerPoint, w)\r\n # null the time series\r\n if r is not None:\r\n y = y - self.friend.getNullBaseline(y, self.friend.episodes['Data'][yind].Protocol.msPerPoint, r)\r\n\r\n if isfilt: # apply a filter based on \"Filter\" tool specification\r\n filterType = self.filtertype_comboBox.currentText()\r\n self.getFiltSettingTable(filterType) # update\r\n y = self.inplaceFiltering(True, filterType, yData=y)\r\n if y is None:\r\n print('filtered y became None')\r\n\r\n else:\r\n y = kwargs[d] # assume everything is processed\r\n\r\n # final assembly\r\n # taking care of uneven Y length\r\n try:\r\n if len(Y)==1:\r\n y_len = len(y)\r\n else:\r\n y_len = min([len(Y), len(y)])\r\n Y = Y[0:y_len]\r\n y = y[0:y_len]\r\n except: # object not iterable, like int\r\n pass\r\n\r\n Y += y * k\r\n\r\n return Y\r\n\r\n def callback(match):\r\n return next(callback.v)\r\n\r\n # parse formula\r\n if \";\" in formula: # a list of formulas\r\n # separate each formula\r\n formula = formula.split(\";\")\r\n elif \"\\n\" in formula: # a list of formulas separated by newline character\r\n formula = formula.split(\"\\n\")\r\n else:\r\n formula = [formula]\r\n\r\n # parse each formula\r\n for f0 in formula:\r\n f = parseTilda(f0)\r\n # if has parenthesis\r\n y = dict()\r\n try:\r\n if \"(\" in f:\r\n # to be safe, remove any duplicate parentheses\r\n f = re.sub(\"(\\()+\", \"(\", f)\r\n f = re.sub(\"(\\))+\", \")\", f)\r\n for s, c, _, _ in self.friend.layout:\r\n # separate into a list of simple ones\r\n fSimpleList = re.findall('\\(([^()]*)\\)', f)\r\n # for each simple ones, do calculation\r\n YList = [simpleMath(fSimple, s, c) for fSimple in fSimpleList]\r\n\r\n newvars = self._newvarsList[:len(fSimpleList)] # ['A','B','C',...]\r\n callback.v = iter(newvars)\r\n # new formula: replace all parentheses with a new variable\r\n nf = re.sub(r'\\(([^()]*)\\)', callback, f)\r\n # build a dictionary between the parentheses values and new variables\r\n nfdict = {}\r\n for nn, v in enumerate(newvars):\r\n nfdict[v] = YList[nn]\r\n # use the new variable, together with episode names that was not\r\n # in the parentheses to calculate the final Y\r\n y[(s,c)] = simpleMath(nf, s, c, **nfdict)\r\n else:\r\n for s, c, _, _ in self.friend.layout:\r\n y[(s,c)] = simpleMath(f, s, c)\r\n except Exception as err:\r\n arithReportBox.setText(\"{}\".format(err))\r\n return\r\n\r\n # Subset of the time series if range specified\r\n ts = self.friend.episodes['Sampling Rate'][0]\r\n\r\n y_len = len(y[s,c]) # length of time series\r\n\r\n # Append the data to friend's episodes object\r\n self.friend.episodes['Name'].append(self.friend.episodes['Name'][-1])\r\n self.friend.episodes['Duration'].append(ind2time(y_len-1,ts)[0])\r\n self.friend.episodes['Drug Time'].append('00:00')\r\n self.friend.episodes['Drug Name'].append('')\r\n self.friend.episodes['Drug Level'].append(-1)\r\n self.friend.episodes['Comment'].append('PySynapse Arithmetic Data')\r\n self.friend.episodes['Dirs'].append(f)\r\n self.friend.episodes['Time'].append('00:00')\r\n self.friend.episodes['Epi'].append(f)\r\n self.friend.episodes['Sampling Rate'].append(ts)\r\n # Make up fake data. Be more complete so that it can be exported correctly\r\n zData = NeuroData()\r\n for s, c, _, _ in self.friend.layout:\r\n setattr(zData, s, {c: y[s,c]})\r\n\r\n # fill in missing data\r\n stream_list,_,_,_ = zip(*self.friend.layout)\r\n stream_all = ['Voltage', 'Current', 'Stimulus']\r\n for _, c, _, _ in self.friend.layout:\r\n for s in stream_all:\r\n if s not in stream_list:\r\n setattr(zData, s, {c: np.zeros(y_len)})\r\n\r\n zData.Time = np.arange(y_len) * ts\r\n zData.Protocol.msPerPoint = ts\r\n zData.Protocol.WCtimeStr = \"\"\r\n zData.Protocol.readDataFrom = self.friend.episodes['Name'][0] + \" \" + f0 + \".dat\"\r\n zData.Protocol.numPoints = y_len\r\n zData.Protocol.acquireComment = 'PySynapse Arithmetic Data'\r\n self.friend.episodes['Data'].append(zData)\r\n\r\n # Redraw episodes with new calculations\r\n episodes = self.friend.episodes # keep the current episode\r\n index = list(range(len(episodes['Epi'])-len(formula), len(episodes['Epi']))) # keep the current index. Make a copy\r\n self.friend.updateEpisodes(episodes=episodes, index=[], updateLayout=False) # clear all the episodes\r\n # temporarily disable isnull\r\n self.friend.isnull = False\r\n # Draw the episodes\r\n self.friend.updateEpisodes(episodes=episodes, index=index, updateLayout=False) # redraw all the episodes\r\n # Turn back isnull\r\n self.friend.isnull = isNulled\r\n\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Annotation widget\">\r\n # ----------- Annotation widget ------------------------------------------\r\n # TODO: when add a TTL object in the table, also display the detailed description of the object\r\n def annotationWidget(self):\r\n \"\"\"Adding annotation items on the graph\"\"\"\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.setObjectName(_fromUtf8(\"AnnotationWidgetFrame\"))\r\n widgetFrame.layout().setSpacing(0)\r\n self.setAnnotationTable()\r\n addButton = QtGui.QPushButton(\"Add\") # Add an annotation object\r\n addButton.clicked.connect(self.addAnnotationRow)\r\n removeButton = QtGui.QPushButton(\"Remove\") # Remove an annotation object\r\n removeButton.clicked.connect(self.removeAnnotationRow)\r\n editButton = QtGui.QPushButton(\"Edit\") # Edit an annotation object\r\n editButton.clicked.connect(self.editAnnotationArtist)\r\n # Add the buttons\r\n widgetFrame.layout().addWidget(addButton, 1, 0)\r\n widgetFrame.layout().addWidget(removeButton, 1, 1)\r\n widgetFrame.layout().addWidget(editButton, 1, 2)\r\n # Add the exisiting annotations to the table\r\n widgetFrame.layout().addWidget(self.annotation_table, 2, 0, 1, 3)\r\n\r\n return widgetFrame\r\n\r\n def setAnnotationTable(self):\r\n \"\"\"\"(Re)initialize the annotation table\"\"\"\r\n self.annotation_table = QtGui.QTableWidget(0, 2)\r\n self.annotation_table.verticalHeader().setVisible(False)\r\n # self.annotation_table.horizontalHeader().setVisible(False)\r\n self.annotation_table.setHorizontalHeaderLabels(['Artist', 'Notes'])\r\n self.annotation_table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\r\n self.annotation_table.horizontalHeader().highlightSections()\r\n self.annotation_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\r\n # Change style sheet a little for Windows 10\r\n if sys.platform[:3]== 'win' and sys.getwindowsversion().major == 10: # fix the problem that in Windows 10, bottom border of header is not displayed\r\n self.annotation_table.setStyleSheet(\"\"\"\r\n QHeaderView::section{\r\n border-top:0px solid #D8D8D8;\r\n border-left:0px solid #D8D8D8;\r\n border-right:1px solid #D8D8D8;\r\n border-bottom: 1px solid #D8D8D8;\r\n background-color:white;\r\n padding:4px;\r\n font: bold 10px;}\r\n QTableCornerButton::section{\r\n border-top:0px solid #D8D8D8;\r\n border-left:0px solid #D8D8D8;\r\n border-right:1px solid #D8D8D8;\r\n border-bottom: 1px solid #D8D8D8;\r\n background-color:white;}\"\"\")\r\n else:\r\n self.annotation_table.setStyleSheet(\"\"\"QHeaderView::section{font: bold 10px;}\"\"\")\r\n\r\n self.annotation_table.itemChanged.connect(self.onArtistChecked)\r\n\r\n def addAnnotationRow(self):\r\n \"\"\"Add annotation into teh table\"\"\"\r\n # annotationSettings.show()\r\n def append_num_to_repeated_str(l, s, recycle=False):\r\n # rep = sum(1 if s in a else 0 for a in l) # count\r\n rep = [a for a in l if s in a] # gather\r\n nums = [int(r[len(s):]) for r in rep]\r\n\r\n # Extract the numbers appended\r\n if not recycle:\r\n if isinstance(nums, list) and not nums:\r\n nums = [0]\r\n s = s + str(max(nums)+1)\r\n else: # smallest avaiable number starting from 1\r\n if isinstance(nums, list) and not nums:\r\n s = s + '1'\r\n else:\r\n s = s + str(min(list(set(range(1, max(nums)+1)) - set(nums))))\r\n\r\n l = l + [s]\r\n\r\n return l, s\r\n\r\n # Pop up the annotation settings window to get the properties of the annotation settings\r\n annSet = AnnotationSetting()\r\n if annSet.exec_(): # Need to wait annotationSettings has been completed\r\n if annSet.artist.keys(): # if properties are properly specified, draw the artist\r\n # Set Artist table item\r\n self.annotationArtists, artist_name = append_num_to_repeated_str(self.annotationArtists, annSet.type)\r\n AT_item = QtGui.QTableWidgetItem(artist_name)\r\n AT_item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) # can be checked | can be selected\r\n AT_item.setCheckState(QtCore.Qt.Checked) # newly added items are always checked\r\n # Add to the table\r\n row = self.annotation_table.rowCount()\r\n self.annotation_table.blockSignals(True)\r\n self.annotation_table.insertRow(row)\r\n self.annotation_table.setItem(row, 0, AT_item)\r\n self.annotation_table.blockSignals(False)\r\n # Get artist property\r\n artistProperty = annSet.artist\r\n artistProperty['type'] = annSet.type\r\n artistProperty['position'] = row\r\n artistProperty['name'] = artist_name\r\n # Draw the artist\r\n artistProperty = self.drawAnnotationArtist(artist=artistProperty)\r\n AT_item._artistProp = artistProperty\r\n\r\n def removeAnnotationRow(self):\r\n numRows = self.annotation_table.rowCount()\r\n if numRows < 1:\r\n return\r\n row = self.annotation_table.currentRow()\r\n # print(row)\r\n if row is None or row < 0: # if no currently selected row, remove the last row / annotation item\r\n row = numRows - 1\r\n\r\n item = self.annotation_table.item(row, 0)\r\n # print(self.annotationArtists)\r\n # print('removing: %s'%item._artistProp['name'])\r\n self.annotationArtists.remove(item._artistProp['name']) # something would be wrong if there is no artist of this name to remove\r\n # print(self.annotationArtists)\r\n self.eraseAnnotationArtist(artist=item._artistProp)\r\n self.annotation_table.removeRow(row)\r\n\r\n def getArtists(self):\r\n \"\"\"Return a dictionary of artists from annotationTable\"\"\"\r\n artist_dict = OrderedDict()\r\n # Annotation table\r\n for r in range(self.annotation_table.rowCount()):\r\n item = self.annotation_table.item(r, 0)\r\n # Get annotation artist\r\n for k, v in item._artistProp.items():\r\n if isstrnum(v):\r\n item._artistProp[k] = str2numeric(v)\r\n\r\n artist_dict[item._artistProp['name']] = item._artistProp\r\n # fitted curve\r\n if hasattr(self, 'fittedCurve'):\r\n artist_dict['fit'] = self.fittedCurve\r\n # Detected events\r\n if hasattr(self, 'eventArtist'):\r\n for evt in self.eventArtist:\r\n artist_dict[evt['name']] = evt\r\n\r\n return artist_dict\r\n\r\n def onArtistChecked(self, item=None):\r\n \"\"\"Respond if click state was changed for pre-existing artists\"\"\"\r\n if item.column() > 0: # editing comments, ignore\r\n return\r\n\r\n if item.checkState() == 0: # remove the artist if it is present\r\n self.eraseAnnotationArtist(artist=item._artistProp)\r\n else: # assume checkstate > 0, likely 2, redraw the artist\r\n self.drawAnnotationArtist(artist=item._artistProp)\r\n\r\n def drawAnnotationArtist(self, artist=None, which_layout=None):\r\n \"\"\"\r\n :param artist: artist properties\r\n :param which_layout: allows only 1 layout\r\n :return: artist\r\n \"\"\"\r\n if which_layout is None:\r\n which_layout = self.friend.layout[0]\r\n artist['layout'] = which_layout\r\n if artist['type'] in ['box', 'line']:\r\n self.friend.drawROI(artist=artist, which_layout=which_layout)\r\n elif artist['type'] == 'ttl':\r\n # Get additional information about TTL from data: a list of OrderedDict\r\n artist['TTL'] = self.friend.episodes['Data'][self.friend.index[-1]].Protocol.ttlDict\r\n # Get SIU duration\r\n artist['SIU_Duration'] =self.friend.episodes['Data'][self.friend.index[-1]].Protocol.genData[53] # microsec\r\n artist['TTLROIs'] = [[]] * len(artist['TTL']) # used to store properties of TTL annotation shapes\r\n # Looping through each TTL data\r\n for n, TTL in enumerate(artist['TTL']):\r\n if ignoreFirstTTL and n == 0:\r\n continue\r\n\r\n if not TTL['is_on']: # global enable\r\n continue\r\n\r\n TTL_art = []\r\n if TTL['Step_is_on']:\r\n TTL_art.append({'start': np.array([TTL['Step_Latency']]),\r\n 'dur': np.array([TTL['Step_Duration']])})\r\n\r\n if TTL['SIU_Single_Shocks_is_on']:\r\n TTL_art.append({'start': np.array([TTL['SIU_A'], TTL['SIU_B'], TTL['SIU_C'], TTL['SIU_D']]),\r\n 'dur': (artist['SIU_Duration']/1000.0 if not artist['bool_pulse2step'] else 25)})\r\n\r\n if TTL['SIU_Train_is_on']:\r\n if artist['bool_pulse2step']:\r\n TTL_art.append({'start': np.array([TTL['SIU_Train_Start']]),\r\n 'dur': np.array([(TTL['SIU_Train_Number'] -1) * TTL['SIU_Train_Interval'] + artist['SIU_Duration']/1000 +\\\r\n ((TTL['SIU_Train_Burst_Number'] - 1) * TTL['SIU_Train_Burst_Internal'] if TTL['SIU_Train_of_Bursts_is_on'] else 0)])})\r\n else:\r\n start_mat = np.arange(int(TTL['SIU_Train_Number'])) * TTL['SIU_Train_Interval'] + TTL['SIU_Train_Start']\r\n if TTL['SIU_Train_of_Bursts_is_on']:\r\n burst_mat = np.arange(int(TTL['SIU_Train_Burst_Number'])) * TTL['SIU_Train_Burst_Interval']\r\n burst_mat = burst_mat[:, np.newaxis]\r\n start_mat = start_mat + burst_mat # broadcasting\r\n start_mat = start_mat.flatten(order='F')\r\n TTL_art.append({'start': start_mat, 'dur': artist['SIU_Duration']/1000})\r\n\r\n artist['TTLROIs'][n] = TTL_art\r\n\r\n # Draw the ROIs once we know the start and the end\r\n if artist['bool_realpulse']:\r\n p = None\r\n for l in self.friend.layout:\r\n if which_layout[0] in l and which_layout[1] in l:\r\n # get graphics handle\r\n p = self.friend.graphicsView.getItem(row=l[2], col=l[3])\r\n break\r\n if not p:\r\n return\r\n yRange = p.viewRange()[1]\r\n yheight = abs((yRange[1] - yRange[0]) / 20.0)\r\n\r\n iteration = 0\r\n self.TTL_final_artist = [] # self.getArtist will get artist from here\r\n for n, TTL in enumerate(artist['TTLROIs']): # for each TTL channel\r\n if not TTL: # continue if empty\r\n continue\r\n for m, ROIs in enumerate(TTL): # for each ROI in the TTL\r\n if artist['bool_realpulse']: # draw a box\r\n y0 = yRange[0] + iteration * yheight * 1.35\r\n for x0 in ROIs['start']:\r\n evt = {'type': 'box', 'x0': x0, 'y0': y0, 'width': ROIs['dur'], 'height': yheight, 'fill': True,\r\n 'fillcolor': 'k', 'line': False, 'linecolor': 'k', 'linewidth': 0, 'linstyle': '-',\r\n 'name': artist['name']}\r\n self.friend.drawROI(artist=evt, which_layout=which_layout)\r\n self.TTL_final_artist.append(evt)\r\n else: # draw as events\r\n evt = self.friend.drawEvent(ROIs['start'], which_layout=which_layout, info=[artist['name']], color='k', drawat='bottom', iteration=iteration)\r\n self.TTL_final_artist.append(evt)\r\n iteration = iteration + 1 # incerase iteration wen drawing everytime\r\n\r\n else:\r\n raise(NotImplementedError(\"'{}' annotation object has not been implemented yet\".format(artist['type'])))\r\n\r\n return artist\r\n\r\n def eraseAnnotationArtist(self, artist=None, which_layout=None):\r\n if artist['type'] == 'ttl' and not artist['bool_realpulse']:\r\n self.friend.removeEvent(info=[artist['name']], which_layout=which_layout, event_type='event')\r\n else:\r\n self.friend.removeEvent(info=[artist['name']], which_layout=which_layout, event_type='annotation')\r\n\r\n def editAnnotationArtist(self):\r\n \"\"\" Redraw a modified artist\"\"\"\r\n numRows = self.annotation_table.rowCount()\r\n if numRows < 1:\r\n return\r\n row =self.annotation_table.currentRow()\r\n if row is None or row < 0: # if no currently selected row, edit the last row\r\n row = numRows - 1\r\n # Get the item\r\n item = self.annotation_table.item(row, 0)\r\n # check item type\r\n if not hasattr(item, '_artistProp') or item._artistProp['type'] not in AnnotationSetting.ann_obj:\r\n notepad = QtGui.QTableWidgetItem()\r\n notepad.setText(\"This item is not editable\")\r\n self.annotation_table.setItem(row, 1, notepad)\r\n return\r\n\r\n # Prompt for new information\r\n annSet = AnnotationSetting(artist=item._artistProp)\r\n if annSet.exec_(): # Need to wait annotationSettings has been completed\r\n if annSet.artist.keys(): # Draw new artist\r\n # Remove the old artist\r\n self.eraseAnnotationArtist(artist=item._artistProp)\r\n artistProperty = annSet.artist\r\n AT_item = QtGui.QTableWidgetItem(artistProperty['name'])\r\n AT_item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) # can be checked | can be selected\r\n AT_item.setCheckState(QtCore.Qt.Checked) # newly added items are always checked\r\n AT_item._artistProp = artistProperty\r\n self.drawAnnotationArtist(artist=artistProperty)\r\n\r\n\r\n def updateTTL(self):\r\n # Get TTL artist\r\n # print('update TTL')\r\n TTL = None\r\n for r in range(self.annotation_table.rowCount()):\r\n if 'ttl' in self.annotation_table.item(r, 0)._artistProp['name'] and \\\r\n self.annotation_table.item(r, 0).checkState():\r\n TTL = self.annotation_table.item(r, 0)._artistProp\r\n break\r\n if TTL is None:\r\n return\r\n\r\n self.eraseAnnotationArtist(artist=TTL)\r\n self.drawAnnotationArtist(artist=TTL)\r\n\r\n def clearAnnotationArtists(self):\r\n print(\"clear all artist\")\r\n\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Layout control\">\r\n # ------- Layout control -------------------------------------------------\r\n def layoutWidget(self):\r\n \"\"\"Setting layout of the graphicsview of the scope\"\"\"\r\n # Generate a list of available channels and streams\r\n all_layouts = self.friend.getAvailableStreams(warning=False)\r\n if not all_layouts: # if no data loaded\r\n return self.buildTextFrame(text=\"No Data Loaded\")\r\n\r\n # Initialize the layout widget\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.setObjectName(_fromUtf8(\"LayoutWidgetFrame\"))\r\n widgetFrame.layout().setSpacing(0)\r\n all_streams = sorted(set([l[0] for l in all_layouts]))\r\n all_streams = [s for s in ['Voltage', 'Current','Stimulus'] if s in all_streams]\r\n all_channels = sorted(set([l[1] for l in all_layouts]))\r\n # Layout setting table\r\n self.setLayoutTable(all_streams, all_channels)\r\n # Buttons for adding and removing channels and streams\r\n addButton = QtGui.QPushButton(\"Add\") # Add a channel\r\n addButton.clicked.connect(lambda: self.addLayoutRow(all_streams=all_streams, all_channels=all_channels))\r\n removeButton = QtGui.QPushButton(\"Remove\") # Remove a channel\r\n removeButton.clicked.connect(self.removeLayoutRow)\r\n # Add the buttons\r\n widgetFrame.layout().addWidget(addButton, 1, 0)\r\n widgetFrame.layout().addWidget(removeButton, 1, 1)\r\n # Add the exisiting channels and streams to the table\r\n widgetFrame.layout().addWidget(self.layout_table, 2, 0, self.layout_table.rowCount(), 2)\r\n return widgetFrame\r\n\r\n def setLayoutTable(self, all_streams, all_channels):\r\n # (Re)initialize the layout table\r\n self.layout_table = QtGui.QTableWidget(0, 2)\r\n self.layout_table.verticalHeader().setVisible(False)\r\n self.layout_table.horizontalHeader().setVisible(False)\r\n self.layout_table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\r\n for l in self.friend.layout: # current layout from scope\r\n self.addLayoutRow(all_streams=all_streams, all_channels=all_channels,\\\r\n current_stream=l[0], current_channel=l[1])\r\n\r\n def addLayoutRow(self, all_streams=['Voltage','Current','Stimulus'], \\\r\n all_channels=['A','B','C','D'], \\\r\n current_stream='Voltage', current_channel='A'):\r\n \"\"\"Create a row of 2 combo boxes, one for stream, one for channel\"\"\"\r\n scomb = QtGui.QComboBox()\r\n scomb.addItems(all_streams)\r\n scomb.setCurrentIndex(all_streams.index(current_stream))\r\n ccomb = QtGui.QComboBox()\r\n ccomb.addItems(all_channels)\r\n ccomb.setCurrentIndex(all_channels.index(current_channel))\r\n row = self.layout_table.rowCount()\r\n self.layout_table.insertRow(row)\r\n self.layout_table.setCellWidget(row, 0, scomb) # Stream\r\n self.layout_table.setCellWidget(row, 1, ccomb) # Channel\r\n if row+1 > len(self.friend.layout): # update layout\r\n self.friend.addSubplot(layout=[current_stream, current_channel, row, 0])\r\n scomb.currentIndexChanged.connect(lambda: self.friend.updateStream(old_layout=['stream', 'channel', row, 0], new_layout=[str(scomb.currentText()), str(ccomb.currentText()), row, 0]))\r\n ccomb.currentIndexChanged.connect(lambda: self.friend.updateStream(old_layout=['stream', 'channel', row, 0], new_layout=[str(scomb.currentText()), str(ccomb.currentText()), row, 0]))\r\n self.updateLayoutComboBox()\r\n # self.layout_comboBox = {'stream':scomb, 'channel':ccomb}\r\n\r\n def updateLayoutComboBox(self):\r\n \"\"\"Called when changing a different dataset\"\"\"\r\n all_layouts = self.friend.getAvailableStreams(warning=False)\r\n all_streams = sorted(set([l[0] for l in all_layouts]))\r\n all_streams = [s for s in ['Voltage', 'Current','Stimulus'] if s in all_streams]\r\n all_channels = sorted(set([l[1] for l in all_layouts]))\r\n for r in range(self.layout_table.rowCount()):\r\n current_stream = self.layout_table.cellWidget(r, 0).currentText()\r\n # IMPORTANT: Need to block the signal from this combobox, otherwise, whatever function connected to this\r\n # combobox will be called, which we want to avoid\r\n self.layout_table.cellWidget(r, 0).blockSignals(True)\r\n self.layout_table.cellWidget(r, 0).clear() # clear all streams\r\n self.layout_table.cellWidget(r, 0).addItems(all_streams) # add back all streams\r\n self.layout_table.cellWidget(r, 0).blockSignals(False)\r\n if current_stream in all_streams: # Set original stream back\r\n self.layout_table.cellWidget(r,0).setCurrentIndex(all_streams.index(current_stream))\r\n\r\n current_channel = self.layout_table.cellWidget(r, 1).currentText()\r\n self.layout_table.cellWidget(r, 1).blockSignals(True)\r\n self.layout_table.cellWidget(r, 1).clear() # clear all channels\r\n self.layout_table.cellWidget(r, 1).addItems(all_channels)\r\n self.layout_table.cellWidget(r, 1).blockSignals(False)\r\n\r\n if current_channel in all_channels:\r\n self.layout_table.cellWidget(r, 1).setCurrentIndex(all_channels.index(current_channel))\r\n\r\n def removeLayoutRow(self):\r\n row = self.layout_table.rowCount()-1\r\n if row < 1:\r\n return\r\n self.layout_table.removeRow(row)\r\n self.friend.removeSubplot(layout = self.friend.layout[-1])\r\n\r\n def buildTextFrame(self, text=\"Not Available\"):\r\n \"\"\"Simply displaying some text inside a frame\"\"\"\r\n someFrame = QtGui.QFrame(self)\r\n someFrame.setLayout(QtGui.QVBoxLayout())\r\n someFrame.setObjectName(\"Banner\")\r\n labelarea = QtGui.QLabel(text)\r\n someFrame.layout().addWidget(labelarea)\r\n return someFrame\r\n\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Curve Fitting tools\">\r\n # -------- Curve Fitting tools -------------------------------------------\r\n def curvefitWidget(self):\r\n \"\"\"This returns the initialized curve fitting widget\r\n \"\"\"\r\n # initialize the widget\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.setObjectName(_fromUtf8(\"CurveFittingWidgetFrame\"))\r\n widgetFrame.layout().setSpacing(10)\r\n # Curve fitting button\r\n fitButton = QtGui.QPushButton(\"Curve Fit\")\r\n # Window\r\n windowLabel = QtGui.QLabel(\"Window\")\r\n windowLabel.setToolTip(\"Overwrite manual Region Selection\")\r\n windowText = QtGui.QLineEdit()\r\n windowText.setToolTip(\"window in ms [start, end]\")\r\n windowText.setPlaceholderText(\"0\")\r\n # Extrapolate\r\n extrapolLabel = QtGui.QLabel(\"Extrapolate\")\r\n extrapolText = QtGui.QLineEdit()\r\n extrapolText.setToolTip(\"* No extrapolation: leave blank;\\n* Extrapolate backwards 500 ms: -500;\\n* Extrapolate forward 500 ms: 500;\\n* Extrapolate both directions: [-500, 500];\")\r\n extrapolText.setPlaceholderText(\"0\")\r\n # Type of curve to fit dropdown box\r\n curveTypeComboBox = QtGui.QComboBox()\r\n curveTypeComboBox.addItems(['Exponential', 'Polynomial', 'Power'])\r\n # Center and scale\r\n # csCheckBox = QtGui.QCheckBox(\"Center and scale\")\r\n # Report box\r\n cfReportBox = QtGui.QTextEdit(\"Curve Fit Results\")\r\n cfReportBox.setStyleSheet(\"background-color: white\")\r\n\r\n # Arrange the widget\r\n widgetFrame.layout().addWidget(fitButton, 0, 0, 1,3)\r\n widgetFrame.layout().addWidget(curveTypeComboBox, 1, 0, 1, 3)\r\n widgetFrame.layout().addWidget(windowLabel, 2, 0, 1, 1)\r\n widgetFrame.layout().addWidget(windowText, 2, 1, 1, 2)\r\n widgetFrame.layout().addWidget(extrapolLabel, 3, 0, 1, 1)\r\n widgetFrame.layout().addWidget(extrapolText, 3, 1, 1, 1)\r\n\r\n self.cfsr = 4 # curve fitting table start row\r\n\r\n # Settings of curve fitting\r\n self.setCFSettingWidgetFrame(widgetFrame, windowText, extrapolText, cfReportBox, curveTypeComboBox.currentText())\r\n\r\n # Refresh setting section when cf type changed\r\n curveTypeComboBox.currentIndexChanged.connect(lambda: self.setCFSettingWidgetFrame(widgetFrame, windowText, extrapolText, cfReportBox, curveTypeComboBox.currentText()))\r\n\r\n # Summary box behavior\r\n fitButton.clicked.connect(lambda: self.curveFit(curveTypeComboBox.currentText(), cfReportBox))#, csCheckBox.checkState()))\r\n\r\n return widgetFrame\r\n\r\n\r\n def setCFSettingWidgetFrame(self, widgetFrame, windowText, extrapolText, cfReportBox, curve):\r\n # Remove everything at and below the setting rows: rigid setting\r\n widgetFrame = self.removeFromWidget(widgetFrame, cfReportBox, row=4)\r\n # Get the setting table again\r\n self.getCFSettingTable(widgetFrame, windowText, extrapolText, cfReportBox, curve)\r\n for key, val in self.CFsettingTable.items():\r\n if isinstance(key, tuple):\r\n widgetFrame.layout().addWidget(val, key[0], key[1])\r\n # Report box\r\n widgetFrame.layout().addWidget(cfReportBox, widgetFrame.layout().rowCount(), 0, 1, 3)\r\n\r\n def getCFSettingTable(self, widgetFrame, windowText, extrapolText, cfReportBox, curve):\r\n if curve == 'Exponential':\r\n eqLabel = QtGui.QLabel(\"Equation:\")\r\n eqComboBox = QtGui.QComboBox()\r\n eqComboBox.addItems(['a*exp(b*x)+c','a*exp(b*x)', 'a*exp(b*x)+c*exp(d*x)'])\r\n eqComboBox.currentIndexChanged.connect(lambda: self.setExpCFInitializationParams(widgetFrame, cfReportBox, eqComboBox.currentText()))\r\n self.CFsettingTable = {(self.cfsr,0): eqLabel, (self.cfsr,1): eqComboBox}\r\n # Call it once at startup to get initialization parameters\r\n self.setExpCFInitializationParams(widgetFrame, cfReportBox, eqComboBox.currentText())\r\n elif curve == 'Power':\r\n eqLabel = QtGui.QLabel(\"Equation\")\r\n eqComboBox = QtGui.QComboBox()\r\n eqComboBox.addItems(['a*x^b', 'a*x^b+c'])\r\n self.CFsettingTable = {(self.cfsr,0): eqLabel, (self.cfsr,1): eqComboBox}\r\n elif curve == 'Polynomial':\r\n degLabel = QtGui.QLabel(\"Degree:\")\r\n degText = QtGui.QLineEdit(\"1\")\r\n self.CFsettingTable = {(self.cfsr,0):degLabel, (self.cfsr,1): degText}\r\n\r\n self.CFsettingTable.update({'window':windowText, 'extrapolate':extrapolText})\r\n\r\n def setExpCFInitializationParams(self, widgetFrame, cfReportBox, equation='a*exp(b*x)+c'):\r\n # Remove everything at and below the setting rows:\r\n widgetFrame = self.removeFromWidget(widgetFrame, reportBox=cfReportBox, row=self.cfsr+1)\r\n # Get the setting table\r\n self.getExpCFParamTable(equation=equation)\r\n for key, val in self.CFsettingTable.items():\r\n if isinstance(key, tuple):\r\n widgetFrame.layout().addWidget(val, key[0], key[1])\r\n # Report box\r\n widgetFrame.layout().addWidget(cfReportBox, widgetFrame.layout().rowCount(), 0, 1, 3)\r\n\r\n def getExpCFParamTable(self, equation='a*exp(b*x)+c'):\r\n cfsr = self.cfsr\r\n if equation == 'a*exp(b*x)+c':\r\n a0_label = QtGui.QLabel('a0')\r\n a0_text = QtGui.QLineEdit('auto')\r\n b0_label = QtGui.QLabel('b0')\r\n b0_text = QtGui.QLineEdit('auto')\r\n c0_label = QtGui.QLabel('c0')\r\n c0_text = QtGui.QLineEdit('0')\r\n self.CFsettingTable[(cfsr+1, 0)] = a0_label\r\n self.CFsettingTable[(cfsr+1, 1)] = a0_text\r\n self.CFsettingTable[(cfsr+2, 0)] = b0_label\r\n self.CFsettingTable[(cfsr+2, 1)] = b0_text\r\n self.CFsettingTable[(cfsr+3, 0)] = c0_label\r\n self.CFsettingTable[(cfsr+3, 1)] = c0_text\r\n elif equation == 'a*exp(b*x)':\r\n a0_label = QtGui.QLabel('a0')\r\n a0_text = QtGui.QLineEdit('auto')\r\n b0_label = QtGui.QLabel('b0')\r\n b0_text = QtGui.QLineEdit('auto')\r\n self.CFsettingTable[(cfsr+1, 0)] = a0_label\r\n self.CFsettingTable[(cfsr+1, 1)] = a0_text\r\n self.CFsettingTable[(cfsr+2, 0)] = b0_label\r\n self.CFsettingTable[(cfsr+2, 1)] = b0_text\r\n elif equation == 'a*exp(b*x)+c*exp(d*x)':\r\n a0_label = QtGui.QLabel('a0')\r\n a0_text = QtGui.QLineEdit('auto')\r\n b0_label = QtGui.QLabel('b0')\r\n b0_text = QtGui.QLineEdit('auto')\r\n c0_label = QtGui.QLabel('c0')\r\n c0_text = QtGui.QLineEdit('auto')\r\n d0_label = QtGui.QLabel('d0')\r\n d0_text = QtGui.QLineEdit('auto')\r\n self.CFsettingTable[(cfsr+1, 0)] = a0_label\r\n self.CFsettingTable[(cfsr+1, 1)] = a0_text\r\n self.CFsettingTable[(cfsr+2, 0)] = b0_label\r\n self.CFsettingTable[(cfsr+2, 1)] = b0_text\r\n self.CFsettingTable[(cfsr+3, 0)] = c0_label\r\n self.CFsettingTable[(cfsr+3, 1)] = c0_text\r\n self.CFsettingTable[(cfsr+4, 0)] = d0_label\r\n self.CFsettingTable[(cfsr+4, 1)] = d0_text\r\n else:\r\n pass\r\n\r\n def getExpCFDefaultParams(self, xdata, ydata, equation='a*exp(b*x)+c'):\r\n if equation == 'a*exp(b*x)+c':\r\n p0 = list(fit_exp_with_offset(xdata, ydata, sort=False))\r\n elif equation == 'a*exp(b*x)':\r\n p0 = [max(ydata), -0.015 if ydata[-1] < ydata[0] else 0.025]\r\n elif equation == 'a*exp(b*x)+c*exp(d*x)':\r\n p0 = [max(ydata), -0.015 if ydata[-1] < ydata[0] else 0.025, max(ydata),\r\n -0.015 if ydata[-1] < ydata[0] else 0.025]\r\n else:\r\n return\r\n\r\n for m in range(len(p0)): # replacing with user custom values\r\n\r\n if self.CFsettingTable[(self.cfsr+1+m, 1)].text() == 'auto':\r\n pass\r\n elif not isstrnum(self.CFsettingTable[(self.cfsr+1+m, 1)].text()):\r\n pass\r\n else:\r\n p0[m] = str2numeric(self.CFsettingTable[(self.cfsr+1+m, 1)].text())\r\n\r\n #print('Initial fitted parameters')\r\n #print(p0)\r\n return p0\r\n\r\n def curveFit(self, curve, cfReportBox, currentView=(0,0)):#, centerAndScale):\r\n cfsr = self.cfsr\r\n # get view\r\n p = self.friend.graphicsView.getItem(row=currentView[0], col=currentView[1])\r\n # clear previous fit artists\r\n # count_fit = 0\r\n for k, a in enumerate(p.listDataItems()):\r\n if 'fit' in a.name():\r\n # count_fit = count_fit + 1\r\n # Erase the older fits\r\n p.removeItem(a)\r\n\r\n # if len(p.listDataItems())-count_fit > 1:\r\n # cfReportBox.setText(\"Can only fit curve at 1 trace at a time. Please select only 1 trace\")\r\n # return\r\n\r\n # Get only the plotted data of first channel / stream\r\n d = p.listDataItems()[0]\r\n\r\n try:\r\n windowRegion = str2num(self.CFsettingTable['window'].text())\r\n xdata, ydata = spk_window(d.xData, d._ts, windowRegion), spk_window(d.yData, d._ts, windowRegion)\r\n except: # defer to Region Selection\r\n if self.friend.viewRegionOn: # fit between region selection\r\n xdata, ydata = spk_window(d.xData, d._ts, self.friend.selectedRange), spk_window(d.yData, d._ts, self.friend.selectedRange)\r\n\r\n else: # fit within the current view\r\n xdata, ydata = spk_window(d.xData, d._ts, p.viewRange()[0]), spk_window(d.yData, d._ts, p.viewRange()[0])\r\n\r\n # remove baseline: -= and += can be tricky. Use carefully\r\n xoffset = xdata[0]\r\n xdata = xdata - xoffset\r\n yoffset = min(ydata)\r\n ydata = ydata - yoffset\r\n\r\n f0 = None\r\n if curve == 'Exponential':\r\n eqText = self.CFsettingTable[(cfsr,1)].currentText()\r\n p0 = self.getExpCFDefaultParams(xdata, ydata, equation=eqText)\r\n if eqText == 'a*exp(b*x)+c':\r\n f0 = lambda x, a, b, c: a*np.exp(b*x)+c\r\n # bounds = [(-max(abs(ydata))*1.1, -10, -np.inf), (max(abs(ydata))*1.1, 10, np.inf)]\r\n ptext = ['a','b','c']\r\n elif eqText == 'a*exp(b*x)':\r\n f0 = lambda x, a, b: a*np.exp(b*x)\r\n # bounds = [(-max(abs(ydata))*1.1, -10), (max(abs(ydata))*1.1, 10)]\r\n ptext = ['a','b']\r\n elif eqText == 'a*exp(b*x)+c*exp(d*x)':\r\n f0 = lambda x, a, b, c, d: a*np.exp(b*x) + c*np.exp(d*x)\r\n # bounds = [(-max(abs(ydata))*1.1, -10, -max(abs(ydata))*1.1, -10), (max(abs(ydata))*1.1, 10, max(abs(ydata))*1.1, 10)]\r\n ptext = ['a','b','c','d']\r\n elif curve == 'Power':\r\n eqText = self.CFsettingTable[(cfsr,1)].currentText()\r\n if eqText == 'a*x^b':\r\n f0 = lambda x, a, b: a*(x**b)\r\n p0 = np.ones(2,)\r\n # bounds = [(-np.inf, -np.inf), (np.inf, np.inf)]\r\n ptext = ['a','b']\r\n elif eqText == 'a*x^b+c':\r\n f0 = lambda x, a, b, c: a*(x**b)+c\r\n p0 = np.ones(3,)\r\n # bounds = [(-np.inf, -np.inf, -np.inf), (np.inf, np.inf, np.inf)]\r\n ptext = ['a','b','c']\r\n elif curve == 'Polynomial':\r\n eqText = self.CFsettingTable[(cfsr,1)].text()\r\n def f0(x, *p):\r\n poly = 0.\r\n for i, n in enumerate(p):\r\n poly += n * x**i\r\n return poly\r\n deg = int(eqText)\r\n p0 = np.ones(deg+1, )\r\n ptext = ['p'+str(i) for i in range(deg+1)]\r\n # bounds = [tuple([-np.inf]*deg), tuple([np.inf]*deg)]\r\n eqText = []\r\n for m, ppt in enumerate(ptext):\r\n if m == 0:\r\n eqText.append(ptext[0])\r\n elif m==1:\r\n eqText.append(ptext[1] + \"*\" + \"x\")\r\n elif m>=2:\r\n if len(ptext)>3:\r\n eqText.append(\"...\")\r\n eqText.append(ptext[-1] + \"*\" + \"x^{:d}\".format(len(ptext)-1))\r\n break\r\n\r\n eqText = \"+\".join(eqText)\r\n\r\n if f0 is None: # shouldn't go here. For debug only\r\n raise(ValueError('Unrecognized curve equation %s: %s'%(curve, eqText)))\r\n\r\n # Fit the curve\r\n try:\r\n popt, pcov = curve_fit(f0, xdata, ydata, p0=p0, method='trf')\r\n except Exception as err:\r\n cfReportBox.setText(\"{}\".format(err))\r\n return\r\n\r\n # Generate fitted data\r\n yfit = f0(xdata, *popt)\r\n # Do some calculations on the fitting before reporting\r\n SSE = np.sum((yfit - ydata)**2)\r\n RMSE = np.sqrt(SSE/len(yfit))\r\n SS_total = np.poly1d(np.polyfit(xdata, ydata, 1))\r\n SS_total = np.sum((SS_total(xdata) - ydata)**2)\r\n R_sq = 1.0 - SSE / SS_total\r\n R_sq_adj = 1.0 - (SSE/(len(xdata)-len(p0))) / (SS_total/(len(xdata)-1))# Adjusted R_sq\r\n # Draw the fitted data\r\n extrapolWindow = str2num(self.CFsettingTable['extrapolate'].text())\r\n if isnumber(extrapolWindow):\r\n if extrapolWindow > 0:\r\n xplot = np.arange(xdata[0], xdata[-1] + extrapolWindow, xdata[-1] - xdata[-2])\r\n elif extrapolWindow < 0:\r\n xplot = np.arange(extrapolWindow, xdata[-1], xdata[-1] - xdata[-2])\r\n else:\r\n xplot = xdata\r\n elif isinstance(extrapolWindow, (tuple, list, np.ndarray)) and len(extrapolWindow)==2: # assuming a pair of numbers, extrapolating both sides\r\n xplot = np.arange(extrapolWindow[0], xdata[-1] + extrapolWindow[1], xdata[-1] - xdata[-2])\r\n else: # otherwise\r\n xplot = np.arange(xdata[0], xdata[-1], xdata[-1]-xdata[-2])\r\n yplot = f0(xplot, *popt)\r\n for a in p.listDataItems():\r\n if 'fit' in a.name():\r\n #a.setData(xdata+xoffset, yfit+yoffset)\r\n a.setData(xplot+xoffset, yplot+yoffset)\r\n else:\r\n #p.plot(xdata+xoffset, yfit+yoffset, pen='r', name='fit: '+eqText)\r\n p.plot(xplot + xoffset, yplot + yoffset, pen='r', name='fit: ' + eqText)\r\n # Add fitted curve to annotation artist\r\n self.fittedCurve = {'x': xdata+xoffset, 'y': yfit+yoffset, 'linecolor': 'r', 'name': 'fit: '+eqText, \\\r\n 'layout': self.friend.layout[currentView[0]], 'type': 'curve'}\r\n # Report the curve fit\r\n final_text = \"Model: {}\\nEquation:\\n\\t{}\\n\".format(curve, eqText)\r\n final_text += \"Parameters:\\n\"\r\n for ppt, coeff in zip(ptext, popt): # report fitted parameters\r\n final_text += \"\\t\" + ppt + \": \" + \"{:.4g}\".format(coeff) + \"\\n\"\r\n if curve == 'Exponential':\r\n final_text += \"Time Constants:\\n\"\r\n if eqText in ['a*exp(b*x)+c', 'a*exp(b*x)']:\r\n tau = -1.0/popt[1]\r\n final_text += \"\\ttau: \" + \"{:.5f} ms\".format(tau) + \"\\n\"\r\n elif eqText == 'a*exp(b*x)+c*exp(d*x)':\r\n tau1, tau2 = -1.0/popt[1], -1.0/popt[3]\r\n final_text += \"\\ttau1: \" + \"{:.5f} ms\".format(tau1) + \"\\n\"\r\n final_text += \"\\ttau2: \" + \"{:.5f} ms\".format(tau2) + \"\\n\"\r\n\r\n final_text += \"\\nGoodness of fit:\\n\\tSSE: {:.4g}\\n\\tR-squared: {:.4g}\\n\\tAdjusted R-squared: {:.4g}\\n\\tRMSE: {:.4g}\".format(SSE, R_sq, R_sq_adj, RMSE)\r\n cfReportBox.setText(final_text)\r\n\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Event Detection tools\">\r\n # ------- Analysis tools -------------------------------------------------\r\n def eventDetectionWidget(self):\r\n \"\"\"This returns the initialized event detection widget\"\"\"\r\n # Initalize the widget\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.setObjectName(_fromUtf8(\"EventDetectionWidgetFrame\"))\r\n widgetFrame.layout().setSpacing(10)\r\n # Detect spikes button\r\n detectButton = QtGui.QPushButton(\"Detect\")\r\n # Type of Event detection to run\r\n # Summary box\r\n detectReportBox = QtGui.QLabel(\"Event Detection Results\")\r\n detectReportBox.setStyleSheet(\"background-color: white\")\r\n detectReportBox.setWordWrap(True)\r\n # Even type selection\r\n eventTypeComboBox = QtGui.QComboBox()\r\n eventTypeComboBox.addItems(['Action Potential', 'Cell Attached Spike', 'EPSP', 'IPSP', 'EPSC','IPSC'])\r\n # Asking to draw on the plot\r\n drawCheckBox = QtGui.QCheckBox(\"Mark Events\")\r\n drawCheckBox.stateChanged.connect(self.clearEvents)\r\n\r\n # Arrange the widget\r\n widgetFrame.layout().addWidget(detectButton, 0, 0, 1, 3)\r\n widgetFrame.layout().addWidget(eventTypeComboBox, 1, 0, 1, 1)\r\n widgetFrame.layout().addWidget(drawCheckBox, 1, 1, 1,1)\r\n\r\n # Settings of event detection\r\n self.setEDSettingWidgetFrame(widgetFrame, detectReportBox, eventTypeComboBox.currentText())\r\n\r\n # Refresh setting section when event type changed\r\n eventTypeComboBox.currentIndexChanged.connect(lambda: self.setEDSettingWidgetFrame(widgetFrame, detectReportBox, eventTypeComboBox.currentText()))\r\n # Summary box behavior\r\n detectButton.clicked.connect(lambda : self.detectEvents(eventTypeComboBox.currentText(), detectReportBox, drawCheckBox.checkState()))\r\n\r\n return widgetFrame\r\n\r\n def setEDSettingWidgetFrame(self, widgetFrame, detectReportBox, event):\r\n # Remove everything at and below the setting rows: rigid setting\r\n widgetFrame = self.removeFromWidget(widgetFrame, reportBox=detectReportBox, row=2)\r\n # Get the setting table again\r\n self.getEDSettingTable(event)\r\n for key, val in self.EDsettingTable.items():\r\n widgetFrame.layout().addWidget(val, key[0], key[1])\r\n # Report box\r\n widgetFrame.layout().addWidget(detectReportBox, widgetFrame.layout().rowCount(), 0, 1, 3)\r\n\r\n def getEDSettingTable(self, event='Action Potential'):\r\n \"\"\"return a table for settings of each even detection\"\"\"\r\n if event == 'Action Potential':\r\n minHeightLabel = QtGui.QLabel(\"Min Height\")\r\n minHeightLabel.setToolTip(\"Minimum amplitude of the AP\")\r\n minHeightTextEdit = QtGui.QLineEdit(\"-10\")\r\n minHeightUnitLabel = QtGui.QLabel(\"mV\")\r\n minDistLabel = QtGui.QLabel(\"Min Dist\")\r\n minDistLabel.setToolTip(\"Minimum distance between detected APs\")\r\n minDistTextEdit = QtGui.QLineEdit(\"1\")\r\n minDistUnitLabel = QtGui.QLabel(\"ms\")\r\n threshLabel = QtGui.QLabel(\"Threshold\")\r\n threshLabel.setToolTip(\"Finds peaks that are at least greater than both adjacent samples by the threshold, TH. TH is a real-valued scalar greater than or equal to zero. The default value of TH is zero.\")\r\n threshTextEdit = QtGui.QLineEdit(\"0\")\r\n threshTextUnitLabel = QtGui.QLabel(\"mV\")\r\n self.EDsettingTable = {(3,0): minHeightLabel, (3,1): minHeightTextEdit,\r\n (3,2): minHeightUnitLabel, (4,0):minDistLabel,\r\n (4,1): minDistTextEdit, (4,2): minDistUnitLabel,\r\n (5,0): threshLabel, (5,1): threshTextEdit, (5,2): threshTextUnitLabel}\r\n elif event in ['EPSP', 'IPSP', 'EPSC','IPSC']:\r\n ampLabel = QtGui.QLabel(\"Amplitude\")\r\n ampLabel.setToolTip(\"Minimum amplitude of the event\")\r\n ampTextEdit = QtGui.QLineEdit(\"0.5\")\r\n ampUnitLabel = QtGui.QLabel(\"mV\")\r\n riseTimeLabel = QtGui.QLabel(\"Rise Time\")\r\n riseTimeLabel.setToolTip(\"Rise time of PSP template\")\r\n riseTimeTextEdit = QtGui.QLineEdit(\"1\")\r\n riseTimeUnitLabel = QtGui.QLabel(\"ms\")\r\n decayTimeLabel = QtGui.QLabel(\"Decay Time\")\r\n decayTimeLabel.setToolTip(\"Decay time of the PSP template\")\r\n decayTimeTextEdit = QtGui.QLineEdit(\"4\")\r\n decayTimeUnitLabel = QtGui.QLabel(\"ms\")\r\n criterionLabel = QtGui.QLabel(\"Criterion\")\r\n criterionLabel.setToolTip(\"Detection statistical criterion: \\n'se': standard error\\n'corr': correlation\")\r\n criterionTextEdit = QtGui.QLineEdit(\"se\")\r\n criterionUnitLabel = QtGui.QLabel(\"\")\r\n threshLabel = QtGui.QLabel(\"Threshold\")\r\n threshLabel.setToolTip(\"Threshold of statistical criterion\")\r\n threshTextEdit = QtGui.QLineEdit(\"3\")\r\n threshUnitLabel = QtGui.QLabel(\"\")\r\n stepLabel = QtGui.QLabel(\"Step\")\r\n stepLabel.setToolTip(\"Step size to convolve the template with the trace\")\r\n stepTextEdit = QtGui.QLineEdit(\"20\")\r\n stepUnitLabel = QtGui.QLabel(\"\")\r\n\r\n self.EDsettingTable = {(3,0):ampLabel, (3,1):ampTextEdit, (3,2):ampUnitLabel,\r\n (4,0):riseTimeLabel, (4,1):riseTimeTextEdit, (4,2):riseTimeUnitLabel,\r\n (5,0):decayTimeLabel, (5,1):decayTimeTextEdit, (5,2):decayTimeUnitLabel,\r\n (6,0):criterionLabel, (6,1):criterionTextEdit, (6,2):criterionUnitLabel,\r\n (7,0):threshLabel, (7,1):threshTextEdit, (7,2):threshUnitLabel,\r\n (8,0):stepLabel, (8,1):stepTextEdit, (8,2):stepUnitLabel\r\n }\r\n\r\n\r\n elif event == 'Cell Attached Spike':\r\n minHeightLabel = QtGui.QLabel(\"Min Height\")\r\n minHeightLabel.setToolTip(\"Minimum amplitude of the spike\")\r\n minHeightTextEdit = QtGui.QLineEdit(\"30\")\r\n minHeightUnitLabel = QtGui.QLabel(\"pA\")\r\n\r\n maxHeightLabel = QtGui.QLabel(\"Min Height\")\r\n maxHeightLabel.setToolTip(\"Minimum amplitude of the spike\")\r\n maxHeightTextEdit = QtGui.QLineEdit(\"300\")\r\n maxHeightUnitLabel = QtGui.QLabel(\"pA\")\r\n\r\n minDistLabel = QtGui.QLabel(\"Min Dist\")\r\n minDistLabel.setToolTip(\"Minimum distance between detected spikes\")\r\n minDistTextEdit = QtGui.QLineEdit(\"10\")\r\n minDistUnitLabel = QtGui.QLabel(\"ms\")\r\n\r\n basefiltLabel = QtGui.QLabel(\"Filter Window\")\r\n basefiltLabel.setToolTip(\"median filter preprocessing window\")\r\n basefiltTextEdit = QtGui.QLineEdit(\"20\")\r\n basefiltUnitLabel = QtGui.QLabel(\"ms\")\r\n\r\n self.EDsettingTable = {(3,0): minHeightLabel, (3,1): minHeightTextEdit, (3,2): minHeightUnitLabel,\r\n (4,0): maxHeightLabel, (4,1): maxHeightTextEdit, (4,2): maxHeightUnitLabel,\r\n (5,0):minDistLabel, (5,1): minDistTextEdit, (5,2): minDistUnitLabel,\r\n (6,0):basefiltLabel, (6,1): basefiltTextEdit, (6,2): basefiltUnitLabel\r\n }\r\n else:\r\n raise(ValueError('Unrecognized event type %s'%(event)))\r\n\r\n def detectEvents(self, event='Action Potential', detectReportBox=None, drawEvents=False, *args, **kwargs):\r\n self.detectedEvents.append(event)\r\n if event == 'Action Potential':\r\n msh = float(self.EDsettingTable[(3,1)].text())\r\n msd = float(self.EDsettingTable[(4,1)].text())\r\n thresh = float(self.EDsettingTable[(5,1)].text())\r\n self.detectAPs(detectReportBox, drawEvents, msh, msd, thresh)\r\n elif event in ['EPSP', 'IPSP', 'EPSC', 'IPSC']:\r\n amp = float(self.EDsettingTable[(3,1)].text())\r\n riseTime = float(self.EDsettingTable[(4,1)].text())\r\n decayTime = float(self.EDsettingTable[(5,1)].text())\r\n criterion = self.EDsettingTable[(6,1)].text()\r\n thresh = float(self.EDsettingTable[7,1].text())\r\n step = float(self.EDsettingTable[(8,1)].text())\r\n self.detectPSPs(detectReportBox, drawEvents, event, riseTime, decayTime, amp, step, criterion, thresh)\r\n elif event == 'Cell Attached Spike':\r\n msh = float(self.EDsettingTable[(3,1)].text())\r\n maxsh = float(self.EDsettingTable[(4,1)].text())\r\n msd = float(self.EDsettingTable[(5,1)].text())\r\n basefilt = float(self.EDsettingTable[(6,1)].text())\r\n self.detectCellAttachedSpikes(detectReportBox, drawEvents, msh, msd, basefilt, maxsh)\r\n\r\n\r\n def clearEvents(self, checked, eventTypes=None, which_layout=None):\r\n \"\"\"Wraps removeEvent. Clear all event types if not specified event\r\n type. Connected to checkbox state\"\"\"\r\n if checked or not self.detectedEvents:\r\n return\r\n\r\n if not eventTypes:\r\n eventTypes = self.detectedEvents\r\n\r\n if isinstance(eventTypes, str):\r\n eventTypes = [eventTypes]\r\n\r\n for evt in eventTypes:\r\n self.friend.removeEvent(info=[evt], which_layout=which_layout)\r\n self.detectedEvents.remove(evt)\r\n\r\n def detectAPs(self, detectReportBox, drawEvent=False, msh=-10, msd=1, thresh=0):\r\n \"\"\"detectAPs(detectReportBox, drawEvent, 'additional settings',...)\"\"\"\r\n if self.friend.viewRegionOn:\r\n selectedWindow = self.friend.selectedRange\r\n else:\r\n selectedWindow = [None, None]\r\n final_label_text = \"\"\r\n iteration = 0\r\n for i in self.friend.index:\r\n zData = self.friend.episodes['Data'][i]\r\n ts = zData.Protocol.msPerPoint\r\n for c, Vs in zData.Voltage.items(): # iterate over channels\r\n Vs = spk_window(Vs, ts, selectedWindow, t0=0)\r\n num_spikes, spike_time, spike_heights = spk_count(Vs, ts, msh=msh, msd=msd, threshold=thresh)\r\n if len(self.friend.index)>0:\r\n final_label_text = final_label_text + os.path.basename(self.friend.episodes['Dirs'][i]) + \"\\n\"\r\n final_label_text = final_label_text + c + \" : \\n\"\r\n final_label_text = final_label_text + \" # spikes: \" + str(num_spikes) + \"\\n\"\r\n final_label_text = final_label_text + \" mean ISI: \"\r\n final_label_text += \"{:0.2f}\".format(np.mean(np.diff(spike_time))) if len(spike_time)>1 else \"NaN\"\r\n final_label_text += \"\\n\"\r\n # Draw event markers\r\n if drawEvent:\r\n if selectedWindow[0] is not None:\r\n spike_time += selectedWindow[0]\r\n # find out the layout\r\n which_layout = self.friend.layout[self.friend.indexStreamChannel('Voltage', c)]\r\n color = self.friend._usedColors[iteration] if self.friend.colorfy else 'r'\r\n eventArtist = self.friend.drawEvent(spike_time, which_layout = which_layout, info=[self.detectedEvents[-1]], color=color, iteration=iteration)\r\n iteration = iteration + 1\r\n self.eventArtist.append(eventArtist)\r\n detectReportBox.setText(final_label_text[:-1])\r\n\r\n def detectPSPs(self, detectReportBox, drawEvent=False, event='EPSP', riseTime=1, decayTime=4, amp=1, step=20, criterion='se', thresh=3.0):\r\n if self.friend.viewRegionOn:\r\n selectedWindow = self.friend.selectedRange\r\n else:\r\n selectedWindow = [None, None]\r\n final_label_text = \"\"\r\n if event in ['EPSP', 'IPSP']:\r\n stream = 'Voltage'\r\n else: # ['EPSC', 'IPSC']\r\n stream = 'Current'\r\n\r\n iteration = 0\r\n for i in self.friend.index:\r\n zData = self.friend.episodes['Data'][i]\r\n ts = zData.Protocol.msPerPoint\r\n # Get events\r\n for c, S in getattr(zData, stream).items():\r\n S = spk_window(S, ts, selectedWindow, t0=0)\r\n event_time, pks, _, _ = detectPSP_template_matching(S, ts, event=event, \\\r\n w=200, tau_RISE=riseTime, tau_DECAY=decayTime, \\\r\n mph=amp, step=step, criterion=criterion, thresh=thresh)\r\n if len(self.friend.index)>0:\r\n final_label_text = final_label_text + os.path.basename(self.friend.episodes['Dirs'][i]) + \"\\n\"\r\n final_label_text = final_label_text + c + \": \\n\"\r\n final_label_text = final_label_text + \" # \" + event + \": \" + str(len(event_time)) + \"\\n\"\r\n final_label_text += \" mean IEI: \"\r\n final_label_text += \"{:0.2f}\".format(np.mean(np.diff(event_time))) if len(event_time)>1 else \"NaN\"\r\n final_label_text += \"\\n\"\r\n # Draw event markers\r\n if drawEvent:\r\n if selectedWindow[0] is not None:\r\n event_time += selectedWindow[0]\r\n\r\n # find out the layout\r\n which_layout = self.friend.layout[self.friend.indexStreamChannel(stream, c)]\r\n color = self.friend._usedColors[iteration] if self.friend.colorfy else 'r'\r\n eventArtist = self.friend.drawEvent(event_time, which_layout = which_layout, info=[self.detectedEvents[-1]], color=color, iteration=iteration)\r\n iteration = iteration + 1\r\n self.eventArtist.append(eventArtist)\r\n detectReportBox.setText(final_label_text[:-1])\r\n\r\n def detectCellAttachedSpikes(self, detectReportBox, drawEvent=False, msh=30, msd=10, basefilt=20, maxsh=300):\r\n if self.friend.viewRegionOn:\r\n selectedWindow = self.friend.selectedRange\r\n else:\r\n selectedWindow = [None, None]\r\n\r\n final_label_text = \"\"\r\n iteration = 0\r\n for i in self.friend.index:\r\n zData = self.friend.episodes['Data'][i]\r\n ts = zData.Protocol.msPerPoint\r\n for c, Is in zData.Current.items():\r\n Is = spk_window(Is, ts, selectedWindow, t0=0)\r\n num_spikes, spike_time, spike_heights = detectSpikes_cell_attached(Is, ts, msh=msh, msd=msd, \\\r\n basefilt=basefilt, maxsh=maxsh, removebase=False)\r\n final_label_text = final_label_text + c + \" : \\n\"\r\n final_label_text = final_label_text + \" # spikes: \" + str(num_spikes) + \"\\n\"\r\n final_label_text = final_label_text + \" mean ISI: \"\r\n final_label_text += \"{:0.2f}\".format(np.mean(np.diff(spike_time))) if len(spike_time)>1 else \"NaN\"\r\n final_label_text += \"\\n\"\r\n # Draw event markers\r\n if drawEvent:\r\n if selectedWindow[0] is not None:\r\n spike_time += selectedWindow[0]\r\n\r\n which_layout = self.friend.layout[self.friend.indexStreamChannel('Current', c)]\r\n color = self.friend._usedColors[iteration] if self.friend.colorfy else 'r'\r\n eventArtist = self.friend.drawEvent(spike_time, which_layout = which_layout, color=color, info=[self.detectedEvents[-1]], iteration=iteration)\r\n iteration = iteration + 1\r\n self.eventArtist.append(eventArtist)\r\n detectReportBox.setText(final_label_text[:-1])\r\n\r\n # </editor-fold>\r\n\r\n #<editor-fold desc=\"Filter widget\">\r\n def filterWidget(self):\r\n \"\"\"Inplace Filter traces\"\"\"\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.setObjectName(_fromUtf8(\"Filter\"))\r\n\r\n filter_checkbox = QtGui.QCheckBox('Apply Filter')\r\n filter_checkbox.setToolTip('Apply inplace filtering to current trace')\r\n self.filtertype_comboBox = QtGui.QComboBox()\r\n self.filtertype_comboBox.addItems(['Butter'])\r\n\r\n widgetFrame.layout().addWidget(filter_checkbox, 0, 0, 1, 2)\r\n widgetFrame.layout().addWidget(self.filtertype_comboBox, 1, 0, 1, 2)\r\n\r\n # Settings of filter\r\n self.setFiltSettingWidgetFrame(widgetFrame, self.filtertype_comboBox.currentText())\r\n\r\n # Refresh setting section when filter type changed\r\n self.filtertype_comboBox.currentIndexChanged.connect(lambda: self.setFilterSettingWidgetFrame(widgetFrame, self.filtertype_comboBox.currentText()))\r\n\r\n # When \"Apply Filter\" checkbox is clicked\r\n filter_checkbox.stateChanged.connect(lambda checked: self.inplaceFiltering(checked, self.filtertype_comboBox.currentText()))\r\n\r\n\r\n return widgetFrame\r\n\r\n def setFiltSettingWidgetFrame(self, widgetFrame, filterType):\r\n self.getFiltSettingTable(filterType)\r\n for key, val in self.FiltSettingTable.items():\r\n widgetFrame.layout().addWidget(val, key[0], key[1])\r\n\r\n def getFiltSettingTable(self, filterType):\r\n if filterType.lower() == 'butter':\r\n order_label = QtGui.QLabel(\"Order\")\r\n order_label.setToolTip(\"Filter order\")\r\n order_text = QtGui.QLineEdit(\"3\")\r\n Wn_label = QtGui.QLabel(\"Wn\")\r\n Wn_label.setToolTip(\"Normalized cutoff frequency, between 0 and 1\")\r\n Wn_text = QtGui.QLineEdit(\"0.2\")\r\n Btype_label = QtGui.QLabel(\"Type\")\r\n Btype_combobox = QtGui.QComboBox()\r\n Btype_combobox.addItems([\"low\",\"high\", \"band\"])\r\n self.FiltSettingTable = {(3,0): order_label, (3,1): order_text, (4,0): Wn_label, (4,1): Wn_text,\r\n (5,0): Btype_label, (5,1): Btype_combobox}\r\n else:\r\n pass\r\n\r\n def inplaceFiltering(self, checked, filterType, currentView=(0,0), yData=None):\r\n p = self.friend.graphicsView.getItem(row=currentView[0], col=currentView[1])\r\n # Get only the plotted data of first channel / stream\r\n data = p.listDataItems()\r\n # Flag export\r\n self.friend.exportFiltered = checked\r\n if checked: # assuming changed from unchecked to checked state, apply the filter\r\n if filterType.lower() == 'butter':\r\n Order = str2numeric(self.FiltSettingTable[(3,1)].text())\r\n Wn = str2num(self.FiltSettingTable[(4,1)].text())\r\n Btype = self.FiltSettingTable[(5,1)].currentText()\r\n self.friend.exportFiltered = {'order': Order, 'wn': Wn, 'btype':Btype}\r\n if yData is None: # inplace\r\n for d in data:\r\n y = self.butterFilter(d.yData, Order, Wn, Btype)\r\n d.original_yData = d.yData\r\n d.setData(d.xData, y)\r\n else:\r\n y = self.butterFilter(yData, Order, Wn, Btype)\r\n return y\r\n else: # inplace only: assuming changed from checked to unchecked state, recover original data\r\n for d in data:\r\n if not hasattr(d, 'original_yData'):\r\n print('Data is not currently filtered, cannot recover original data')\r\n return\r\n else:\r\n d.setData(d.xData, d.original_yData)\r\n\r\n def butterFilter(self, y, Order, Wn, Btype=\"low\"):\r\n b, a = butter(Order, Wn, btype=Btype, analog=False, output='ba')\r\n y_filt = filtfilt(b, a, y)\r\n return y_filt\r\n\r\n\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Function widget\">\r\n def functionWidget(self):\r\n \"\"\"Apply a function to selected regions and print out the summary\"\"\"\r\n widgetFrame = QtGui.QFrame(self)\r\n widgetFrame.setLayout(QtGui.QGridLayout())\r\n widgetFrame.setObjectName(_fromUtf8(\"FunctionWidgetFrame\"))\r\n widgetFrame.layout().setSpacing(10)\r\n # Apply button\r\n applyButton = QtGui.QPushButton(\"Apply\")\r\n # Select from a list of pre-existing tools, or enter a custom function\r\n functionComboBox = QtGui.QComboBox()\r\n functionComboBox.addItems(['mean', 'std', 'diff', 'rms', 'series resistance', 'Rin', 'Rin2', 'mean time']) # 'custom'\r\n # Summary box\r\n functionReportBox = QtGui.QLabel(\"Apply a function\")\r\n functionReportBox.setStyleSheet(\"background-color: white\")\r\n functionReportBox.setWordWrap(True)\r\n # Arrange the widget\r\n widgetFrame.layout().addWidget(applyButton, 0, 0, 1, 3)\r\n widgetFrame.layout().addWidget(functionComboBox, 1, 0, 1, 3)\r\n\r\n # Get setting for each function\r\n self.setAFSettingWidgetFrame(widgetFrame, functionReportBox, functionComboBox.currentText())\r\n\r\n # Refresh setting section when function changed\r\n functionComboBox.currentIndexChanged.connect(lambda: self.setAFSettingWidgetFrame(widgetFrame, functionReportBox, functionComboBox.currentText()))\r\n # Summary box behavior\r\n applyButton.clicked.connect(lambda: self.applyFunction(functionComboBox.currentText(), functionReportBox))\r\n return widgetFrame\r\n\r\n def setAFSettingWidgetFrame(self, widgetFrame, functionReportBox, func):\r\n # Remove everything at and below the setting rows: rigid setting\r\n widgetFrame = self.removeFromWidget(widgetFrame, reportBox=functionReportBox, row=2)\r\n self.getFASettingTable(func, functionReportBox)\r\n for key, val in self.FASettingTable.items():\r\n widgetFrame.layout().addWidget(val, key[0], key[1], 1, 3)\r\n # Report box\r\n widgetFrame.layout().addWidget(functionReportBox, widgetFrame.layout().rowCount(), 0, 1, 3)\r\n if func == 'Rin':\r\n functionReportBox.setText(\"Calculate Rin from negative pulse in the trace\")\r\n elif func == 'Rin2':\r\n functionReportBox.setText(\"Calculate Rin from two episodes\")\r\n elif func == 'series resistance':\r\n functionReportBox.setText(\"Calculate series resistance from current trace in voltage clamp\")\r\n elif func ==' diff':\r\n functionReportBox.setText(\"Calculate the average difference between two trace\")\r\n elif func == 'mean time':\r\n functionReportBox.setText(\"Calculate the average time of a window\")\r\n else:\r\n functionReportBox.setText(\"Apply a function\")\r\n\r\n def getFASettingTable(self, func='mean', functionReportBox=None):\r\n \"\"\"Return a table for settings of each function to be applied\"\"\"\r\n if func == \"Rin\":\r\n useCurrent_checkBox = QtGui.QCheckBox(\"Use current instead\")\r\n useCurrent_checkBox.setToolTip(\"Check to use current to calculate input resistance instead\")\r\n windowSize_label = QtGui.QLabel(\"Window (ms)\")\r\n windowSize_textbox = QtGui.QLineEdit(\"25\")\r\n self.FASettingTable = {(2,0):useCurrent_checkBox, (3,0):windowSize_label, (4,0):windowSize_textbox}\r\n\r\n elif func =='custom':\r\n customFuncTextEdit = QtGui.QLineEdit()\r\n customFuncTextEdit.setPlaceholderText(\"Custom Function\")\r\n customFuncTextEdit.setToolTip(\"Enter a custom function to be applied\")\r\n self.FASettingTable = {(2,0):customFuncTextEdit}\r\n\r\n else:\r\n self.FASettingTable = {}\r\n\r\n\r\n def applyFunction(self, func='mean', functionReportBox=None, *args, **kwargs):\r\n if not self.friend.index:\r\n functionReportBox.setText(\"Select episode to apply function to\")\r\n return\r\n layout = self.friend.layout[0] # Apply only onto the trace in the first / top layout\r\n zData = self.friend.episodes['Data'][self.friend.index[-1]]\r\n ts = zData.Protocol.msPerPoint\r\n final_label_text = \"\"\r\n\r\n if func in ['mean', 'std', 'rms']:\r\n Y = getattr(zData, layout[0])[layout[1]]\r\n if self.friend.viewRegionOn:\r\n Y = spk_window(Y, ts, self.friend.selectedRange)\r\n\r\n if func == 'mean':\r\n if layout[0][0].lower() == 'v':\r\n unit_suffix = 'mV'\r\n elif layout[0][0].lower() == 'C':\r\n unit_suffix = 'pA'\r\n else: # Stimulus\r\n unit_suffix = ''\r\n final_label_text = \"mean: {:.9f} {}\".format(np.mean(Y), unit_suffix)\r\n elif func == 'std':\r\n final_label_text = \"std: {:.9f}\".format(np.std(Y))\r\n elif func == 'rms':\r\n final_label_text = \"rms: {:.9f}\".format(rms(Y))\r\n elif func == 'series resistance':\r\n Vs = zData.Stimulus[layout[1]]\r\n Is = zData.Current[layout[1]]\r\n series_resistance, tau, adjrsquare = spk_vclamp_series_resistance(Is, Vs, ts)\r\n final_label_text = \"R series: {:.3f} MOhm\\nadjrsquare: {:.9f}\".format(series_resistance, adjrsquare)\r\n elif func == 'Rin': # Calculate Rin with negative step\r\n if not self.friend.viewRegionOn:\r\n final_label_text = \"Select a region to calculate Rin\"\r\n else:\r\n useCurrent = self.FASettingTable[(2,0)].isChecked()\r\n window_size = str2numeric(self.FASettingTable[(4,0)].text())\r\n\r\n V1 = np.mean(spk_window(zData.Voltage[layout[1]], ts, self.friend.selectedRange[0] + window_size * np.asarray([-1,1])))\r\n V2 = np.mean(spk_window(zData.Voltage[layout[1]], ts, self.friend.selectedRange[1] + window_size * np.asarray([-1,1])))\r\n\r\n if useCurrent:\r\n S1 = np.mean(spk_window(zData.Current[layout[1]], ts, self.friend.selectedRange[0] + window_size * np.asarray([-1,1])))\r\n S2 = np.mean(spk_window(zData.Current[layout[1]], ts, self.friend.selectedRange[1] + window_size * np.asarray([-1,1])))\r\n else:\r\n S1 = np.mean(spk_window(zData.Stimulus[layout[1]], ts, self.friend.selectedRange[0] + window_size * np.asarray([-1,1])))\r\n S2 = np.mean(spk_window(zData.Stimulus[layout[1]], ts, self.friend.selectedRange[1] + window_size * np.asarray([-1,1])))\r\n\r\n Rin = (V2-V1)/(S2-S1)*1000\r\n final_label_text = \"Rin = {:.9f} MOhm;\".format(Rin)\r\n elif func == 'Rin2': # Calculating for 2 episodes with holding current change\r\n if not self.friend.viewRegionOn:\r\n final_label_text = \"Select a region to calculate Rin\"\r\n elif len(self.friend.index)<2:\r\n final_label_text = \"Select two episodes to calculate Rin\"\r\n else:\r\n V1 = np.mean(spk_window(zData.Voltage[layout[1]], ts, self.friend.selectedRange))\r\n S1 = np.mean(spk_window(zData.Current[layout[1]], ts, self.friend.selectedRange))\r\n\r\n zData2 = self.friend.episodes['Data'][self.friend.index[-2]]\r\n V2 = np.mean(spk_window(zData2.Voltage[layout[1]], ts, self.friend.selectedRange))\r\n S2 = np.mean(spk_window(zData2.Current[layout[1]], ts, self.friend.selectedRange))\r\n\r\n Rin = (V2 - V1) / (S2 - S1) * 1000\r\n final_label_text = \"Rin = {:.5f} MOhm;\".format(Rin)\r\n\r\n elif func == 'diff': # Calculating average difference between two episodes\r\n if len(self.friend.index)<2:\r\n final_label_text = \"Select two episodes to calculate diff\"\r\n else:\r\n zData1 = self.friend.episodes['Data'][self.friend.index[-1]]\r\n zData2 = self.friend.episodes['Data'][self.friend.index[-2]]\r\n\r\n Y1 = getattr(zData1, layout[0])[layout[1]]\r\n Y2 = getattr(zData2, layout[0])[layout[1]]\r\n if self.friend.viewRegionOn:\r\n Y1 = spk_window(Y1, ts, self.friend.selectedRange)\r\n Y2 = spk_window(Y2, ts, self.friend.selectedRange)\r\n\r\n final_label_text = 'Diff = {:.9f}'.format(np.mean(Y1) - np.mean(Y2))\r\n\r\n elif func == 'mean time': # given a window, display the start, end, and mean time\r\n start_time, end_time = self.friend.selectedRange\r\n mean_time = (start_time + end_time) / 2.0\r\n final_label_text = 'Start: {:.9f}\\nEnd: {:.9f}\\nMean: {:.9f}'.format(start_time, end_time, mean_time)\r\n\r\n else: # custom function\r\n pass\r\n functionReportBox.setText(final_label_text.strip())\r\n\r\n # </editor-fold>\r\n\r\n # <editor-fold desc=\"Other utilities 2\">\r\n # ------- Other utilities ------------------------------------------------\r\n def replaceWidget(self, widget=None, index=0):\r\n old_widget = self.accWidget.takeAt(index)\r\n self.accWidget.addItem(title=old_widget.title(), widget=widget, collapsed=old_widget._collapsed,\r\n index=index)\r\n return\r\n\r\n def removeFromWidget(self, widgetFrame, reportBox, row=0):\r\n \"\"\"Remove widgets from a widgetFrame below row, excluding a reportBox\"\"\"\r\n nrows = widgetFrame.layout().rowCount()\r\n if nrows>row:\r\n for r in range(row, nrows):\r\n for col in range(widgetFrame.layout().columnCount()):\r\n currentItem = widgetFrame.layout().itemAtPosition(r, col)\r\n if currentItem is not None:\r\n if currentItem.widget() is not reportBox:\r\n currentItem.widget().deleteLater()\r\n else:\r\n widgetFrame.layout().removeItem(currentItem)\r\n\r\n return widgetFrame\r\n\r\n def sizeHint(self):\r\n \"\"\"Helps with initial dock window size\"\"\"\r\n return QtCore.QSize(self.friend.frameGeometry().width() / 4.95, 20)\r\n\r\n # </editor-fold>\r\n\r\n"
},
{
"alpha_fraction": 0.5469832420349121,
"alphanum_fraction": 0.5873743295669556,
"avg_line_length": 42.16790008544922,
"blob_id": "4fdb68e6b680c171b5f40a472447da522d8ac969",
"content_id": "c26e0a6a115f00cbade6352bb3d512e3e8756ea2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17900,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 405,
"path": "/image_processing/ai2svg.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\r\n# -*- coding: latin-1 -*-\r\n#- ai2svg converter 060901c (for Inkscape .inx) - Paulo Silva (GPL licence)\r\n#- file version reading: 1,2,3,4,5,6,7 - saved from Adobe Illustrator 7\r\n#- file version reading: 1,2,3,5,7 - exported from Macromedia Freehand 8\r\n#---------------------------------------\r\n#- missing: |\r\n#- - grouped paths (????_ |\r\n#- - placed and embedded pics (????_ |\r\n#- - gradients (????) |\r\n#- - patterns (????) |\r\n#- - cmyk to rgb accuracy (????) |\r\n#---------------------------------------\r\n#- layers, joined paths, dashes: ok\r\n#- textboxes (with some bugs: anchors, characters, etc...)\r\n\r\nimport os,sys\r\nfinp_st=sys.argv[1];fout_st=finp_st+\".svg\"\r\nfstcmd=0;xo=0;yo=0;xn=0;yn=0\r\n#--------functions-------------------------------------------------\r\ndef reverse(a1_st): #- gets a reversed string\r\n a1_st=a1_st.replace(\"\\n\",\"\");a1leng=len(a1_st);tmpr_st=\"\"\r\n for lp1 in range(0,a1leng,1):\r\n tmpr_st+=a1_st[a1leng-1-lp1]\r\n return tmpr_st\r\ndef locstrinfo(a_st,b_st,c_st): #- seeks a substring between defined chars\r\n adra=0;adrb=0;d_st=\"\"\r\n while adrb<=len(a_st) and adrb<len(b_st):\r\n if a_st[adra]==b_st[adrb]:adrb+=1\r\n adra+=1\r\n while a_st[adra]!=c_st[0]:\r\n d_st+=a_st[adra];adra+=1\r\n return d_st\r\ndef hexvl(a_st): #- does the reverse way from 'hex()'\r\n a_st=a_st.lower();tmpr=0;hx_st=\"0123456789abcdef\";hx_st=hx_st.lower()\r\n for i in range(0,len(a_st),1):tmpr=(tmpr*16)+hx_st.find(a_st[i])\r\n return tmpr\r\ndef fixhxc_st(va): #- fixes the lenght of a hex string to 6 characters\r\n tm_st=\"000000\"+hex(va).lstrip(\"0x\");tm_st=\"#\"+tm_st[len(tm_st)-6:]\r\n return tm_st.upper()\r\ndef fixhex(va,sz): #- fixes the lenght of a hex string\r\n tml_st=\"\"\r\n for i in range (0,sz,1):tml_st+=\"0\"\r\n tml_st+=hex(va).lstrip(\"0x\");tml_st=tml_st[len(tml_st)-sz:]\r\n return tml_st.upper()\r\ndef hexfromrgb(r1,g1,b1): #- gets hexcolour string from rgb\r\n if r1<0:r1=0\r\n if g1<0:g1=0\r\n if b1<0:b1=0\r\n if r1>255:r1=255\r\n if g1>255:g1=255\r\n if b1>255:b1=255\r\n rgbv=(65536*r1)+(256*g1)+(b1);tm_st=\"000000\"+hex(rgbv).lstrip(\"0x\")\r\n tm_st=tm_st[len(tm_st)-6:]\r\n return tm_st.upper()\r\ndef hexcmykrgbinv(c1,m1,y1,k1): #- gets fake rgb from inverting cmyk\r\n r1=int((1-c1-k1)*255)\r\n if r1<0: r1=0\r\n g1=int((1-m1-k1)*255)\r\n if g1<0: g1=0\r\n b1=int((1-y1-k1)*255)\r\n if b1<0: b1=0\r\n rgbv=(65536*r1)+(256*g1)+(b1);hxrgbv_st=hex(rgbv)\r\n #print rgbv\r\n tm_st=\"000000\"+hxrgbv_st.lstrip(\"0x\");tm_st=tm_st[len(tm_st)-6:]\r\n return tm_st.upper()\r\ndef ystupdn(yc1_st,ye1_st): #- flips y position (string to string)\r\n return str(float(ye1_st)-float(yc1_st))\r\ndef aisvgtxrepl(txtm_st): #- brand new function i don't know if works (Tx)(latin-1)\r\n #- some cleanup\r\n txtm_st=txtm_st.replace(\"<\",\"<\")\r\n txtm_st=txtm_st.replace(\">\",\">\")\r\n txtm_st=txtm_st.replace(\"&\",\"&\")\r\n txtm_st=txtm_st.replace(\"\\\"\",\""\")\r\n txtm_st=txtm_st.replace(\"\\'\",\"'\")\r\n txtm_st=txtm_st.replace(\"\\\\)\",\")\") #- not working?\r\n txtm_st=txtm_st.replace(\"\\\\(\",\"(\")\r\n txtm_st=txtm_st.replace(\"\\\\015\",\"\") #- .ai linebreak\r\n txtm_st=txtm_st.replace(\"\\\\136\",\"?\")\r\n txtm_st=txtm_st.replace(\"\\\\177\",\"?\")\r\n #- from 128 to 159 (unicode stuff...)\r\n txtm_st=txtm_st.replace(\"\\\\200\",\"€\");txtm_st=txtm_st.replace(\"\\\\201\",\" \")\r\n txtm_st=txtm_st.replace(\"\\\\202\",\"‚\");txtm_st=txtm_st.replace(\"\\\\203\",\"ƒ\")\r\n txtm_st=txtm_st.replace(\"\\\\204\",\"„\");txtm_st=txtm_st.replace(\"\\\\205\",\"…\")\r\n txtm_st=txtm_st.replace(\"\\\\206\",\"†\");txtm_st=txtm_st.replace(\"\\\\207\",\"‡\")\r\n txtm_st=txtm_st.replace(\"\\\\210\",\"ˆ\"); txtm_st=txtm_st.replace(\"\\\\211\",\"‰\")\r\n txtm_st=txtm_st.replace(\"\\\\212\",\"Š\"); txtm_st=txtm_st.replace(\"\\\\213\",\"‹\")\r\n txtm_st=txtm_st.replace(\"\\\\214\",\"Œ\"); txtm_st=txtm_st.replace(\"\\\\215\",\" \")\r\n txtm_st=txtm_st.replace(\"\\\\216\",\"Ž\"); txtm_st=txtm_st.replace(\"\\\\217\",\" \")\r\n txtm_st=txtm_st.replace(\"\\\\220\",\" \"); txtm_st=txtm_st.replace(\"\\\\221\",\"‘\")\r\n txtm_st=txtm_st.replace(\"\\\\222\",\"’\");txtm_st=txtm_st.replace(\"\\\\223\",\"“\")\r\n txtm_st=txtm_st.replace(\"\\\\224\",\"”\");txtm_st=txtm_st.replace(\"\\\\225\",\"•\")\r\n txtm_st=txtm_st.replace(\"\\\\226\",\"–\");txtm_st=txtm_st.replace(\"\\\\227\",\"—\")\r\n txtm_st=txtm_st.replace(\"\\\\230\",\"˜\"); txtm_st=txtm_st.replace(\"\\\\231\",\"™\")\r\n txtm_st=txtm_st.replace(\"\\\\232\",\"š\"); txtm_st=txtm_st.replace(\"\\\\233\",\"›\")\r\n txtm_st=txtm_st.replace(\"\\\\234\",\"œ\"); txtm_st=txtm_st.replace(\"\\\\235\",\" \")\r\n txtm_st=txtm_st.replace(\"\\\\236\",\"ž\"); txtm_st=txtm_st.replace(\"\\\\237\",\"Ÿ\")\r\n #- from 160 to 255\r\n for i in range(160,256,1):\r\n j=((i&7)+(((i&56)/8)*10)+(((i&192)/64)*100))\r\n txtm2_st=\"&#\"+str(i)+\";\";txtm1_st=\"\\\\\"+str(j)\r\n txtm_st=txtm_st.replace(txtm1_st,txtm2_st)\r\n txtm_st=txtm_st.replace(\"\\\\\",\"\\")\r\n return txtm_st\r\n\r\ndef clzempty(f_hxstroke_st,f_strokewidth_st,f_miterlimit_st,f_dasharray_st,f_dashoffset_st):\r\n print\"z\\\" style=\\\"fill-rule:evenodd;fill:none;fill-opacity:1;\"\r\n print\"stroke:#\"+f_hxstroke_st+\";stroke-opacity:1;stroke-width:\"+f_strokewidth_st+\";\"\r\n print\"stroke-linecap:butt;stroke-linejoin:miter;\"\r\n print\"stroke-miterlimit:\"+f_miterlimit_st+\";stroke-dasharray:\"+f_dasharray_st+\";stroke-dashoffset:\"+f_dashoffset_st+\";\"\r\n print\"visibility:visible;display:inline;overflow:visible\\\"/>\\n\"\r\n\r\ndef clzpolyline(f_hxstroke_st,f_strokewidth_st,f_miterlimit_st,f_dasharray_st,f_dashoffset_st):\r\n print\"\\\" style=\\\"fill-rule:evenodd;fill:none;fill-opacity:1;\"\r\n print\"stroke:#\"+f_hxstroke_st+\";stroke-opacity:1;stroke-width:\"+f_strokewidth_st+\";\"\r\n print\"stroke-linecap:butt;stroke-linejoin:miter;\"\r\n print\"stroke-miterlimit:\"+f_miterlimit_st+\";stroke-dasharray:\"+f_dasharray_st+\";stroke-dashoffset:\"+f_dashoffset_st+\"\\\"/>\\n\"\r\n\r\ndef clzfilled(f_hxfill_st,f_hxstroke_st,f_strokewidth_st,f_miterlimit_st,f_dasharray_st,f_dashoffset_st):\r\n print\"z\\\" style=\\\"fill-rule:evenodd;fill:#\"+f_hxfill_st+\";fill-opacity:1;\"\r\n print\"stroke:#\"+f_hxstroke_st+\";stroke-opacity:1;stroke-width:\"+f_strokewidth_st+\";\"\r\n print\"stroke-linecap:butt;stroke-linejoin:miter;\"\r\n print\"stroke-miterlimit:\"+f_miterlimit_st+\";stroke-dasharray:\"+f_dasharray_st+\";stroke-dashoffset:\"+f_dashoffset_st+\";\"\r\n print\"visibility:visible;display:inline;overflow:visible\\\"/>\\n\"\r\n\r\ndef clzsolid(f_hxfill_st,f_strokewidth_st,f_miterlimit_st,f_dashoffset_st):\r\n print\"z\\\" style=\\\"fill-rule:evenodd;fill:#\"+f_hxfill_st+\";fill-opacity:1;\"\r\n print\"stroke:none;stroke-opacity:1;stroke-width:\"+f_strokewidth_st+\";\"\r\n print\"stroke-linecap:butt;stroke-linejoin:miter;\"\r\n print\"stroke-miterlimit:\"+f_miterlimit_st+\";stroke-dashoffset:\"+f_dashoffset_st+\";\"\r\n print\"visibility:visible;display:inline;overflow:visible\\\"/>\\n\"\r\n\r\n#------------- code -----------------------------------------------\r\n#if finp_st.lower()==\"--help\".lower():\r\n# print\"ai2svg.py - Paulo Silva (GPL licence)\"\r\n# print\"usage: python ai2svg.py yourdrawing.ai\"\r\n# print\"the result will appear as neighbour named yourdrawing.ai.svg\"\r\n# print\"(please keep in mind all results may need some repairs)\"\r\n#else:\r\n #------------- starting --------------------------------------------\r\nfinp_fl=open(finp_st,\"r\")\r\n#fout_fl=open(fout_st,\"w\")\r\nrdflg=0;shapeflg=0;fntszflg=0\r\ntxboxflg=0;pathjoin=0;lastshape_st=\"f\"\r\nstrokewidth=1;miterlimit=4;dasharray_st=\"\";dashoffset=0;txboxline_st=\"\"\r\nhxfill_st=\"000000\";hxstroke_st=\"000000\"\r\nxtxboxpos=0;ytxboxpos=0;ycurlead=0;ysteplead=10\r\nidpath=0;idtxbox=0;idtxspan=0;lastjpath=0\r\nid=0 #- try to make this variable obsolete, replacing with 'idpath'\r\n#- svg header\r\nprint\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\" standalone=\\\"no\\\"?>\"\r\n#------------- converter looping-------------------------------------\r\n\r\nrdflg=1\r\n\r\nwhile True:\r\n #- track state of being in a path\r\n is_open = False\r\n\r\n #- reading next .ai line\r\n wltext_st=finp_fl.readline()\r\n if len(wltext_st)==0:break\r\n wltext_st=wltext_st+\"\\n\"\r\n wltext_st=wltext_st.replace(\"\\r\",\"\\n\")\r\n #- substituir por um loop do tamanho da string\r\n for tm1 in range(0,len(wltext_st)+2,1):\r\n wltext_st=wltext_st.replace(\"\\n\\n\",\"\\n\")\r\n wltext_st=wltext_st.replace(\"%%TemplateBox:\",\"%%TemplateBox: \")\r\n wltext_st=wltext_st.replace(\"%%TemplateBox: \",\"%%TemplateBox: \")\r\n while len(wltext_st)>0:\r\n bkln=wltext_st.find(\"\\n\")\r\n text_st=wltext_st[:bkln+1]\r\n wltext_st=wltext_st[bkln+1:]\r\n\r\n #- cleaning breaklines and tabs from string\r\n text_st=text_st.replace(\"\\n\",\"\");text_st=text_st.replace(\"\\t\",\" \")\r\n text_st=text_st.strip();\r\n textrev_st=reverse(text_st);textrev_st=\">\"+textrev_st+\" 0 0 0 0 0 0 0 0\"\r\n text_st=\">\"+text_st+\" 0 0 0 0 0 0 0 0\"\r\n #- getting substrings\r\n v0_st=locstrinfo(text_st,\">\",\" \")\r\n v1_st=locstrinfo(text_st,\"> \",\" \")\r\n v2_st=locstrinfo(text_st,\"> \",\" \")\r\n v3_st=locstrinfo(text_st,\"> \",\" \")\r\n v4_st=locstrinfo(text_st,\"> \",\" \")\r\n v5_st=locstrinfo(text_st,\"> \",\" \")\r\n v6_st=locstrinfo(text_st,\"> \",\" \")\r\n v7_st=locstrinfo(text_st,\"> \",\" \")\r\n v0rev_st=reverse(locstrinfo(textrev_st,\">\",\" \"))\r\n v1rev_st=reverse(locstrinfo(textrev_st,\"> \",\" \"))\r\n #- gets paper size (ai1)\r\n if v0_st==\"%%TemplateBox:\":\r\n hv1v=float(v1_st)+float(v3_st);vv1v=float(v2_st)+float(v4_st)\r\n print\"<svg width=\\\"\"+str(hv1v)+\"pt\\\" height=\\\"\"+str(vv1v)+\"pt\\\">\\n\\n\"\r\n yed_st=str(vv1v)\r\n #- gets paper size (ai3)\r\n if v0_st==\"%AI3_TemplateBox:\":\r\n hv1v=float(v1_st)+float(v3_st);vv1v=float(v2_st)+float(v4_st)\r\n print\"<svg width=\\\"\"+str(hv1v)+\"pt\\\" height=\\\"\"+str(vv1v)+\"pt\\\">\\n\\n\"\r\n yed_st=str(vv1v)\r\n #- w - stroke width\r\n if v1_st==\"w\":strokewidth=float(v0_st)\r\n\r\n #- k - fill colour\r\n if v4_st==\"k\":hxfill_st=hexcmykrgbinv(float(v0_st),float(v1_st),float(v2_st),float(v3_st))\r\n #- K - stroke colour\r\n if v4_st==\"K\":hxstroke_st=hexcmykrgbinv(float(v0_st),float(v1_st),float(v2_st),float(v3_st))\r\n\r\n #- x - fill colour (?)\r\n if v0rev_st==\"x\":hxfill_st=hexcmykrgbinv(float(v0_st),float(v1_st),float(v2_st),float(v3_st))\r\n #- X - stroke colour (?)\r\n if v0rev_st==\"X\":hxstroke_st=hexcmykrgbinv(float(v0_st),float(v1_st),float(v2_st),float(v3_st))\r\n\r\n #- M - miter limit\r\n if v1_st==\"M\":miterlimit=float(v0_st)\r\n #- d - dashes (array and offset) and path start\r\n if v0rev_st==\"d\" and pathjoin==0: # and shapeflg==0 and rdflg==1\r\n #print\"<path id=\\\"path_\"+str(idpath)+\"\\\" d=\\\"\"\r\n #idpath+=1;shapeflg=1\r\n dasharray_st=locstrinfo(text_st,\"[\",\"]\")\r\n dasharray_st=dasharray_st.strip()\r\n dasharray_st=dasharray_st.replace(\" \",\",\")\r\n if dasharray_st==\"\":dasharray_st=\"none\"\r\n text1_st=text_st.replace(\"] \",\"]\")\r\n dashoffset=float(locstrinfo(text1_st,\"]\",\" \").strip())\r\n if dashoffset==0:dashoffset=0\r\n #- situation: 0 J 0 j 1 w 3.8636 M []0 d\r\n #- M: find 'M' (miter) position in string, get left$ until 'M' and get 2nd word as float from last\r\n if v7_st==\"M\":miterlimit=float(v6_st)\r\n\r\n #- w: find 'w' (line thickness width) in the same way of 'M' above\r\n if v5_st==\"w\":strokewidth=float(v4_st)\r\n\r\n #- XR - fill rule (what for?)\r\n\r\n #- Tp - textbox position (starts text box if not started???)\r\n if v0rev_st==\"Tp\":\r\n if shapeflg==1:\r\n print\"\\\"/>\"+\"\\n\"\r\n txboxflg=1;xtxboxpos=float(v4_st);ytxboxpos=float(v5_st);ycurlead=ytxboxpos\r\n print\"<text x=\\\"\"+str(xtxboxpos)+\"\\\" y=\\\"\"+ystupdn(str(ytxboxpos),yed_st)+\"\\\" id=\\\"tb_\"+str(idtxbox)+\"\\\" style=\\\"\\n\"\r\n idtxbox+=1\r\n #- Ta - text alignment (0 left, 1 mid, 2 right, 3 justified)\r\n if v0rev_st==\"Ta\":\r\n if int(float(v0_st))==0:\r\n print\"text-align:start;text-anchor:start;\\n\"\r\n if int(float(v0_st))==1:\r\n print\"text-align:center;text-anchor:middle;\\n\"\r\n if int(float(v0_st))==2:\r\n print\"text-align:end;text-anchor:end;\\n\"\r\n if int(float(v0_st))==3: #- i'm not sure about this one\r\n print\"text-align:start;text-anchor:start;\\n\"\r\n #- Tl - text leading (check if \" \" were replaced to \" \")\r\n if v0rev_st==\"Tl\":\r\n ysteplead=float(v0_st)\r\n #- Tf - font size\r\n if v0rev_st==\"Tf\" and fntszflg==0:\r\n print\"font-size:\"+v1_st+\"px;\\\"\"+\"\\n\"\r\n fntszflg=1\r\n #- Tx - Tj - text strings\r\n if v0rev_st==\"Tx\" or v0rev_st==\"Tj\":\r\n txboxline_st=locstrinfo(text_st,\"(\",\")\")\r\n txboxline_st=aisvgtxrepl(txboxline_st)\r\n print\"><tspan id=\\\"ts_\"+str(idtxspan)+\"\\\" x=\\\"\"+str(xtxboxpos)+\"\\\" y=\\\"\"+ystupdn(str(ycurlead),yed_st)+\"\\\"\\n\"\r\n print\">\"+txboxline_st+\"</tspan\\n\"\r\n idtxspan+=1;ycurlead-=ysteplead\r\n #- TO - ends textbox\r\n if v0_st==\"TO\":\r\n print\"></text>\\n\\n\"\r\n txboxflg=0;shapeflg=0;fntszflg=0\r\n #- gets 'begin layer' ? (rdflg=1 ?)\r\n #if v0_st==\"%AI5_BeginLayer\":rdflg=1\r\n\r\n #- LB - ensures if path ends (?)\r\n if v0_st==\"LB\" and shapeflg==1:\r\n shapeflg=0;print\"\\\"/>\\n\\n\"\r\n #- %%EOF - end of file\r\n if v0_st==\"%%EOF\":print\"</svg>\\n\"\r\n\r\n #- layer conversion - i have to clean it...\r\n #- Ln - begin layer? (beginning layer here, because layer name)\r\n #if v1_st==\"Ln\":print\"<g id=\\\"\"+v0_st+\"\\\">\\n\\n\"\r\n #- %AI5_EndLayer-- - ends layer\r\n #if v0_st==\"%AI5_EndLayer--\":print\"</g>\\n\\n\"\r\n\r\n close_me = False\r\n #- *u - starts joined pathes\r\n if v0_st==\"*u\" or v0_st==\"u\":\r\n if is_open:\r\n close_me = True\r\n is_open = True\r\n pathjoin=1;lastjpath=0\r\n #- *U - ends joined pathes\r\n if v0_st==\"*U\" or v0_st==\"U\":\r\n if is_open:\r\n close_me = True\r\n is_open = False\r\n pathjoin=0;lastjpath=0\r\n\r\n if close_me:\r\n #- s - empty shapes (from joined pathes)\r\n if lastshape_st==\"s\":\r\n shapeflg=0\r\n clzempty(hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n #- S - polylines (from joined pathes)\r\n if lastshape_st==\"S\":\r\n shapeflg=0\r\n clzpolyline(hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n #- b - filled shapes (from joined pathes)\r\n if lastshape_st==\"b\":\r\n shapeflg=0\r\n clzfilled(hxfill_st,hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n #- B - filled shapes (from joined pathes (?))\r\n if lastshape_st==\"B\":\r\n shapeflg=0\r\n clzfilled(hxfill_st,hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n #- f - solid shapes (from joined pathes)\r\n if lastshape_st==\"f\":\r\n shapeflg=0\r\n clzsolid(hxfill_st,str(strokewidth),str(miterlimit),str(dashoffset))\r\n #- F - solid shapes (from joined pathes (?))\r\n if lastshape_st==\"F\":\r\n shapeflg=0\r\n clzsolid(hxfill_st,str(strokewidth),str(miterlimit),str(dashoffset))\r\n\r\n #- lastshape values from pathjoin=1\r\n if rdflg==1 and shapeflg==1 and pathjoin==1:\r\n if v0_st==\"s\":\r\n lastshape_st=\"s\";print\"z\\n\"\r\n if pathjoin==1:lastjpath+=1\r\n if v0_st==\"S\":\r\n lastshape_st=\"S\";print\"z\\n\"\r\n if pathjoin==1:lastjpath+=1\r\n if v0_st==\"b\":\r\n lastshape_st=\"b\";print\"z\\n\"\r\n if pathjoin==1:lastjpath+=1\r\n if v0_st==\"f\":\r\n lastshape_st=\"f\";print\"z\\n\"\r\n if pathjoin==1:lastjpath+=1\r\n #- checks if shapeflg and rdflg are true and pathjoin false? (... what for?)\r\n if rdflg==1 and pathjoin==0: # and shapeflg==1\r\n #- s - empty shapes\r\n if v0_st==\"s\":\r\n shapeflg=0\r\n clzempty(hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n #- S - polylines\r\n if v0_st==\"S\":\r\n shapeflg=0\r\n clzpolyline(hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n\r\n #- '(N) *' - polylines (guides exported from Freehand?)\r\n if v0_st==\"(N)\" and v0rev_st==\"*\":\r\n shapeflg=0\r\n clzpolyline(hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n\r\n #- N - leaves path opened (rare?)\r\n if v0_st==\"N\":\r\n shapeflg=0\r\n clzpolyline(hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n\r\n #- b - filled shapes\r\n if v0_st==\"b\":\r\n shapeflg=0\r\n clzfilled(hxfill_st,hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n #- B - filled shapes (?)\r\n if v0_st==\"B\":\r\n shapeflg=0\r\n clzfilled(hxfill_st,hxstroke_st,str(strokewidth),str(miterlimit),dasharray_st,str(dashoffset))\r\n #- f - solid shapes\r\n if v0_st==\"f\":\r\n shapeflg=0\r\n clzsolid(hxfill_st,str(strokewidth),str(miterlimit),str(dashoffset))\r\n #- F - solid shapes (?)\r\n if v0_st==\"F\":\r\n shapeflg=0\r\n clzsolid(hxfill_st,str(strokewidth),str(miterlimit),str(dashoffset))\r\n\r\n #- checks if shapeflg and rdflg are true\r\n if rdflg==1 and txboxflg==0: # and shapeflg==1\r\n\r\n #- m - first coordinate node from a path (shapeflg=0?)\r\n if v2_st==\"m\": # and shapeflg=0:\r\n # supposed to 'm' starting every path?\r\n #if pathjoin==0:\r\n if pathjoin==0 or (pathjoin==1 and lastjpath==0):\r\n print \"<path id=\\\"path_\"+str(idpath)+\"\\\" d=\\\"\"\r\n idpath+=1;shapeflg=1\r\n print \"M \"+v0_st+\" \"+ystupdn(v1_st,yed_st)\r\n\r\n #- m - first coordinate node from a path (shapeflg=0)\r\n #if v2_st==\"m\" and shapeflg=0:\r\n # # supposed to 'm' starting every path?\r\n # print\"<path id=\\\"path_\"+str(idpath)+\"\\\" d=\\\"\"\r\n # idpath+=1;shapeflg=1\r\n # print\"M \"+v0_st+\" \"+ystupdn(v1_st,yed_st)\r\n\r\n #- L - straight line coordinate from the last coordinate\r\n if v2_st==\"l\":\r\n print \"L \"+v0_st+\" \"+ystupdn(v1_st,yed_st)\r\n #- C - bezier line coordinates from the last coordinate\r\n if v6_st==\"c\":\r\n print \"C \"+v0_st+\" \"+ystupdn(v1_st,yed_st)+\" \"+v2_st+\" \"+ystupdn(v3_st,yed_st)+\" \"+v4_st+\" \"+ystupdn(v5_st,yed_st)\r\n\r\nfinp_fl.close()\r\n#fout_fl.close()\r\n\r\n\r\n \t \t \r\n"
},
{
"alpha_fraction": 0.5843518972396851,
"alphanum_fraction": 0.5874075293540955,
"avg_line_length": 39.373374938964844,
"blob_id": "9918a0d0c723b9ffd4179931b97dc79a8cdc9032",
"content_id": "61f3df506fbdb857f542bb20ece9b1c7b94d4865",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12436,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 308,
"path": "/Spikes/spikedetekt2/spikedetekt2/core/main.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"Main module.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport spikedetekt2\nimport logging\nimport imp\nimport os.path as op\n\nimport numpy as np\nimport tables as tb\n\nfrom .progressbar import ProgressReporter\nfrom kwiklib.dataio import (BaseRawDataReader, read_raw, excerpt_step,\n to_contiguous, convert_dtype, KwdRawDataReader, ExperimentRawDataReader)\nfrom spikedetekt2.processing import (bandpass_filter, apply_filter, decimate,\n get_threshold, connected_components, extract_waveform,\n compute_pcs, project_pcs, DoubleThreshold)\nfrom kwiklib.utils import (Probe, iterkeys, debug, info, warn, exception,\n display_params, FileLogger, register, unregister)\n\n\ndef _import_module(path):\n module_name = op.basename(path)\n module_name = op.splitext(module_name)[0]\n return imp.load_source(module_name, path)\n\n\n# -----------------------------------------------------------------------------\n# Processing\n# -----------------------------------------------------------------------------\ndef apply_threshold(chunk_fil, threshold=None, **prm):\n # Determine the chunk used for thresholding.\n if prm['detect_spikes'] == 'positive':\n chunk_detect = chunk_fil\n elif prm['detect_spikes'] == 'negative':\n chunk_detect = -chunk_fil\n elif prm['detect_spikes'] == 'both':\n chunk_detect = np.abs(chunk_fil)\n\n # Perform thresholding.\n # shape: (nsamples, nchannels)\n chunk_threshold = DoubleThreshold(\n strong=chunk_detect > threshold.strong,\n weak=chunk_detect > threshold.weak,\n )\n return chunk_detect, chunk_threshold\n\ndef extract_waveforms(chunk_detect=None, threshold=None,\n chunk_fil=None, chunk_raw=None,\n probe=None, components=None,\n **prm):\n # For now, we use the same binary chunk for detection and extraction\n # +/-chunk, or abs(chunk), depending on the parameter 'detect_spikes'.\n chunk_extract = chunk_detect # shape: (nsamples, nchannels)\n # This is a list of Waveform instances.\n waveforms = []\n for component in components:\n w = extract_waveform(component,\n chunk_extract=chunk_extract,\n chunk_fil=chunk_fil,\n chunk_raw=chunk_raw,\n threshold_strong=threshold.strong,\n threshold_weak=threshold.weak,\n probe=probe,\n **prm)\n if w is not None:\n waveforms.append(w)\n\n # Remove skipped waveforms (in overlapping chunk sections).\n # waveforms = [w for w in waveforms if w is not None]\n return waveforms\n\ndef add_waveform(experiment, waveform, **prm):\n \"\"\"Add a Waveform instance to an Experiment.\"\"\"\n experiment.channel_groups[waveform.channel_group].spikes.add(\n time_samples=waveform.s_offset,\n time_fractional=waveform.s_frac_part,\n recording=waveform.recording,\n waveforms_raw=waveform.raw,\n waveforms_filtered=waveform.fil,\n masks=waveform.masks,\n )\n\ndef save_features(experiment, **prm):\n \"\"\"Compute the features from the waveforms and save them in the experiment\n dataset.\"\"\"\n nwaveforms_max = prm['pca_nwaveforms_max']\n npcs = prm['nfeatures_per_channel']\n kwik = experiment._files['kwik']\n\n for chgrp in iterkeys(experiment.channel_groups):\n spikes = experiment.channel_groups[chgrp].spikes\n # Extract a subset of the saveforms.\n nspikes = len(spikes)\n\n # We convert the extendable features_masks array to a\n # contiguous array.\n if prm.get('features_contiguous', True):\n # Make sure to update the PyTables node after the recreation,\n # to avoid ClosedNodeError.\n spikes.features_masks = to_contiguous(spikes.features_masks, nspikes=nspikes)\n else:\n warn((\"The features array has not been converted to a contiguous \"\n \"array.\"))\n\n # Skip the channel group if there are no spikes.\n if nspikes == 0:\n continue\n nwaveforms = min(nspikes, nwaveforms_max)\n step = excerpt_step(nspikes, nexcerpts=nwaveforms, excerpt_size=1)\n waveforms_subset = spikes.waveforms_filtered[::step]\n\n # With this option, PCs are directly provided in the PRM file as\n # a NumPy array\n if prm.get('canonical_pcs', None) is not None:\n pcs = prm['canonical_pcs']\n assert isinstance(pcs, np.ndarray)\n else:\n # We take the masks in order to compute the PCs only on\n # the unmasked spikes, for each channel.\n masks = spikes.features_masks[::step,::npcs,1] # (nspikes, nchannels)\n # Compute the PCs.\n pcs = compute_pcs(waveforms_subset, npcs=npcs, masks=masks)\n\n # Add PCs to the KWIK file\n kwik.createArray(experiment.channel_groups[chgrp]._node, 'pca_waveforms',\n pcs)\n\n # Project the waveforms on the PCs and compute the features.\n # WARNING: optimization: we could load and project waveforms by chunks.\n for i, waveform in enumerate(spikes.waveforms_filtered):\n # Convert waveforms from int16 to float32 with scaling\n # before computing PCA so as to avoid getting huge numbers.\n waveform = convert_dtype(waveform, np.float32)\n features = project_pcs(waveform, pcs)\n spikes.features_masks[i,:,0] = features.ravel()\n\n\n# -----------------------------------------------------------------------------\n# File logger\n# -----------------------------------------------------------------------------\ndef create_file_logger(filename):\n # global LOGGER_FILE\n LOGGER_FILE = FileLogger(filename, name='file',\n level=logging.DEBUG)\n register(LOGGER_FILE)\n return LOGGER_FILE\n\ndef close_file_logger(LOGGER_FILE):\n unregister(LOGGER_FILE)\n\n\n# -----------------------------------------------------------------------------\n# Main loop\n# -----------------------------------------------------------------------------\ndef run(raw_data=None, experiment=None, prm=None, probe=None,\n _debug=False, convert_only=False):\n \"\"\"This main function takes raw data (either as a RawReader, or a path\n to a filename, or an array) and executes the main algorithm (filtering,\n spike detection, extraction...).\"\"\"\n assert experiment is not None, (\"An Experiment instance needs to be \"\n \"provided in order to write the output.\")\n\n # Create file logger for the experiment.\n LOGGER_FILE = create_file_logger(experiment.gen_filename('log'))\n\n # Get parameters from the PRM dictionary.\n chunk_size = prm.get('chunk_size', None)\n chunk_overlap = prm.get('chunk_overlap', 0)\n nchannels = prm.get('nchannels', None)\n\n # Ensure a RawDataReader is instantiated.\n if raw_data is not None:\n if not isinstance(raw_data, BaseRawDataReader):\n raw_data = read_raw(raw_data, nchannels=nchannels)\n else:\n raw_data = read_raw(experiment)\n\n # Log.\n if convert_only:\n info(\"Starting file conversion only. Klusta version {1:s}, on {0:s}\".format((str(raw_data)), spikedetekt2.__version__))\n info(\"Running spike detection on a single chunk of spikes only, so as to have some information\")\n first_chunk_detected = False # horrible hack - detects spikes on one chunk only so KV doesn't complain\n else:\n info(\"Starting SpikeDetekt version {1:s} on {0:s}\".format((str(raw_data)), spikedetekt2.__version__))\n debug(\"Parameters: \\n\" + (display_params(prm)))\n\n # Get the bandpass filter.\n filter = bandpass_filter(**prm)\n\n if not (convert_only and first_chunk_detected):\n # Compute the strong threshold across excerpts uniformly scattered across the\n # whole recording.\n threshold = get_threshold(raw_data, filter=filter,\n channels=probe.channels, **prm)\n assert not np.isnan(threshold.weak).any()\n assert not np.isnan(threshold.strong).any()\n debug(\"Threshold: \" + str(threshold))\n\n # Debug module.\n diagnostics_path = prm.get('diagnostics_path', None)\n if diagnostics_path:\n diagnostics_mod = _import_module(diagnostics_path)\n if not hasattr(diagnostics_mod, 'diagnostics'):\n raise ValueError(\"The diagnostics module must implement a \"\n \"'diagnostics()' function.\")\n diagnostics_fun = diagnostics_mod.diagnostics\n else:\n diagnostics_fun = None\n\n\n # Progress bar.\n progress_bar = ProgressReporter(period=30.)\n nspikes = 0\n\n # Loop through all chunks with overlap.\n for chunk in raw_data.chunks(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap,):\n # Log.\n debug(\"Processing chunk {0:s}...\".format(chunk))\n\n nsamples = chunk.nsamples\n rec = chunk.recording\n nrecs = chunk.nrecordings\n s_end = chunk.s_end\n\n # Filter the (full) chunk.\n chunk_raw = chunk.data_chunk_full # shape: (nsamples, nchannels)\n chunk_fil = apply_filter(chunk_raw, filter=filter)\n\n i = chunk.keep_start - chunk.s_start\n j = chunk.keep_end - chunk.s_start\n\n # Add the data to the KWD files.\n if prm.get('save_raw', False):\n # Do not append the raw data to the .kwd file if we're already reading\n # from the .kwd file.\n if not isinstance(raw_data, (KwdRawDataReader, ExperimentRawDataReader)):\n # Save raw data.\n experiment.recordings[chunk.recording].raw.append(convert_dtype(chunk.data_chunk_keep, np.int16))\n\n if prm.get('save_high', False):\n # Save high-pass filtered data: need to remove the overlapping\n # sections.\n chunk_fil_keep = chunk_fil[i:j,:]\n experiment.recordings[chunk.recording].high.append(convert_dtype(chunk_fil_keep, np.int16))\n\n if prm.get('save_low', True):\n # Save LFP.\n chunk_low = decimate(chunk_raw)\n chunk_low_keep = chunk_low[i//16:j//16,:]\n experiment.recordings[chunk.recording].low.append(convert_dtype(chunk_low_keep, np.int16))\n\n if not (convert_only and first_chunk_detected):\n # Apply thresholds.\n chunk_detect, chunk_threshold = apply_threshold(chunk_fil,\n threshold=threshold, **prm)\n\n # Remove dead channels.\n dead = np.setdiff1d(np.arange(nchannels), probe.channels)\n chunk_detect[:,dead] = 0\n chunk_threshold.strong[:,dead] = 0\n chunk_threshold.weak[:,dead] = 0\n\n # Find connected component (strong threshold). Return list of\n # Component instances.\n components = connected_components(\n chunk_strong=chunk_threshold.strong,\n chunk_weak=chunk_threshold.weak,\n probe_adjacency_list=probe.adjacency_list,\n chunk=chunk, **prm)\n\n # Now we extract the spike in each component.\n waveforms = extract_waveforms(chunk_detect=chunk_detect,\n threshold=threshold, chunk_fil=chunk_fil, chunk_raw=chunk_raw,\n probe=probe, components=components, **prm)\n\n # DEBUG module.\n # Execute the debug script.\n if diagnostics_fun:\n try:\n diagnostics_fun(**locals())\n except Exception as e:\n warn(\"The diagnostics module failed: \" + e.message)\n\n # Log number of spikes in the chunk.\n nspikes += len(waveforms)\n\n # We sort waveforms by increasing order of fractional time.\n [add_waveform(experiment, waveform) for waveform in sorted(waveforms)]\n\n first_chunk_detected = True\n\n # Update the progress bar.\n progress_bar.update(rec/float(nrecs) + (float(s_end) / (nsamples*nrecs)),\n '%d spikes found.' % (nspikes))\n\n # DEBUG: keep only the first shank.\n if _debug:\n break\n\n # Feature extraction.\n save_features(experiment, **prm)\n\n close_file_logger(LOGGER_FILE)\n progress_bar.finish()\n\n"
},
{
"alpha_fraction": 0.6669161915779114,
"alphanum_fraction": 0.7023453116416931,
"avg_line_length": 36.92232894897461,
"blob_id": "3800e2f96564aa9d85faeb1b157c3178ec27bd27",
"content_id": "4e1e3ee8b9ae35a53ad3f652913f07ff32602e71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8016,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 206,
"path": "/python_tutorials/PythonForDataAnalysis/Chapter_12_numpy_part2.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 12 02:45:05 2014\r\nModified on Thur May 22 12:12:05 2014\r\n@author: Edward\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom numpy.random import randn\r\n\r\n# Reshape\r\narr = np.arange(8)\r\narr.reshape((4,2))\r\narr.reshape((4,2)).reshape((2,4))\r\n#can specify only one dimension, and let the program infer the other dimension\r\n#by passing a -1\r\narr = np.arange(15)\r\narr.reshape((5,-1))\r\n\r\n# falttening / raveling\r\narr = np.arange(15).reshape((5,3))\r\narr.ravel() # does not copy data\r\narr.flatten()\r\n\r\n# row major vs. column major: C vs. Fortran data order\r\n# row major: data of each row are stored adjacently in the memory locations; \r\n# --> rule used by C, and default of numpy\r\n# column major: data of each column are stored adjacently in the memory\r\n# locations; --> rule used by Fortran, and default of MATLAB\r\narr = np.arange(12).reshape((3,4))\r\narr.ravel() # taking by rows, taking higher dimensions first, then to axis 0\r\n#vs\r\narr.ravel('F') #taking by columns, taking lower dimensions first\r\n# to set specific orders, set order when assinging the varibable\r\narr_c = np.ones((1000,1000), order = 'C')\r\narr_c.flags #check variable attribute\r\narr_f = np.ones((1000,1000), order = 'F')\r\narr_f.flags #check variable attribute\r\n# to make a copy of the array with converted order\r\nnew_arr_c = arr_f.copy('C')\r\nnew_arr_c.flags\r\n# In general, continguous data performs better than non-contiguous data.\r\n# For instance, to sum the aray over the rows, C_contiguous data runs faster\r\n# than F_contiguous data\r\n\r\n# concatenation and splitting\r\narr1 = np.array([[1,2,3],[4,5,6]])\r\narr2 = np.array([[7,8,9],[10,11,12]])\r\nnp.concatenate([arr1,arr2],axis=0) # concatenate along first dimension / rows\r\n# alternatively\r\nnp.vstack((arr1,arr2)) #vertically stack\r\nnp.column_stack((arr1,arr2))\r\nnp.concatenate([arr1,arr2],axis=1) # concatenate along second dimension /columns\r\n# alternatively\r\nnp.hstack((arr1,arr2)) #horizontally stack\r\nnp.row_stack((arr1,arr2))\r\n# splitting arrays\r\narr = randn(5,2)\r\nfirst, second, third = np.split(arr, [1,3])\r\n# stacking helpers: r_ and c_\r\narr1 = np.arange(6).reshape((3,2))\r\narr2 = randn(3,2)\r\nnp.r_[arr1,arr2] # row concatenation\r\nnp.c_[np.r_[arr1,arr2],np.arange(6)] #column concatenation\r\nnp.c_[1:6,-10:-5]#translate slices/MATLAB-like vector notation into arrays\r\n\r\n# repeating notations: tile and repeat\r\narr = np.arange(3)\r\narr.repeat(3) # repeat each element 3 times, concatenate each repeats along the row (over columns)\r\n#array([0, 0, 0, 1, 1, 1, 2, 2, 2]) --> different from MATLAB's repmat\r\n#can specify how many times each element repeat\r\narr.repeat([2,3,4])# repeat the 1st element twice, 2nd three times, and 3rd four times\r\n# can repeat along a particular axis\r\narr = randn(2,2)\r\narr.repeat(2,axis=0) #repeaet along rows\r\n# tile is the same as MATLAB's repatm, which stacks copies of the array\r\nnp.tile(arr,2) #default along the row/axis=0\r\n# much like repmat, can specify the layout of the repeats\r\nnp.tile(arr,(3,2))#repeat 3 times along the row, and twice along the column\r\n\r\n# Fancy indexing equivalents: take and put\r\narr = np.arange(10) * 100\r\ninds = [7, 1, 2, 6]\r\narr[inds] # fancy indexing\r\narr.put(inds, [40,41,42,43])#put [40, 41, 42,43] in place of arr[inds], one by one\r\n# this method does not return anything, but will modify the array arr\r\narr.take(inds,axis=0) # get the elements of the array at the indices.\r\n# This is the same as arr[ind]. Performance for each is relatively the same,\r\n# but take may be slightly better (10s~20s of nanoseconds)\r\n\r\n# Broadcasting\r\narr = randn(4,3)\r\narr.mean(0)#take the mean along axis=0\r\ndmeaned = arr - arr.mean(0) #broadcast along the rows, so that from each element,\r\n# the mean of its respective column is removed\r\ndmeaned = arr - arr.mean(1).reshape((4,1)) #broadcast along the columns, so that from each element,\r\n# the mean of its respective row is removed. Note that we have to reshape\r\n# the array into a volumn vector to allow broadcasting to occur. Tranpose will\r\n# not work!\r\n# adding new axis: np.newaxis\r\narr = np.zeros((4,4))\r\narr_3rd = arr[:, np.newaxis, :]\r\narr_1d = np.random.normal(size=3)\r\narr_1d[:,np.newaxis] # transpose row vector into column vector\r\n# suppose we have a 3D array, and we want to demean axis=2 (3rd dimension)\r\narr = randn(3,4,5)\r\ndemeaned = arr - arr.mean(2)[:,:,np.newaxis]\r\n# The following function generalize demean procedue without sacraficing performance\r\ndef demean_axis(arr, axis=0):\r\n # demean without sacraficing performance\r\n means = arr.mean(axis) # get the mean to be removed\r\n #This generalized things like [:,:,np.newaxis] to N dimensions\r\n indexer = [slice(None)] * arr.ndim #create slicer object with the same dimension as arr\r\n indexer[axis] = np.newaxis #set the axis at which demean needs to be performed to np.newaxis\r\n return arr - means[indexer]#demean\r\n\r\n# ufunc\r\n\r\n# reduce: aggregate values by successive applications of operation\r\n# reduceat(x,bins): local reduce or group by.\r\narr = np.arange(10)\r\nnp.add.reduce(arr) # aggregates values, along an axis\r\n# equivalent to \r\nnp.sum(arr)\r\narr = randn(5,5)\r\narr[::2].sort(1) # sort row 0 to row 2\r\narr[:,:-1] < arr[:,1:] # for each row, compare if the last numbers are smaller the first numbers, i.e. sorted\r\nnp.logical_and.reduce(arr[:,:-1]<arr[:,1:],axis=1)\r\n\r\n# accumulate: aggregate values, preserving all partial aggregates\r\narr = np.arange(15).reshape((3,5))\r\n#accumulate: produce intermediate accumulated values, comparable to cumsum\r\nnp.add.accumulate(arr,axis=1)\r\n\r\n# outer: apply operations to all pairs of elements in x and y. Result array has shape x.shape + y.shape\r\narr = np.arange(3).repeat([1,2,2])\r\nnp.multiply.outer(arr,np.arange(5))\r\nresult = np.subtract.outer(randn(3,4), randn(5))\r\n# result of outer will have the dimension the sum of the inputs\r\n# note that the sum of tuples (which wha shapes are) are simly concatenating\r\n# the tuples\r\nresult.shape\r\n#(3,4,5)\r\n\r\n# custom ufuncs: useful but slower than numpy's C based library\r\ndef add_element(x,y):\r\n return x+y\r\n\r\nadd_them = np.frompyfunc(add_element, 2,1)\r\nadd_them(np.arange(8),np.arange(8))#returns python object\r\n\r\nadd_them = np.vectorize(add_element, otypes=[np.float64])\r\nadd_them(np.arange(8),np.arange(8)) #return array\r\n\r\n# Sorting\r\narr = randn(3,5)\r\narr[:,0].sort() # sort first column values, original modified\r\nnp.sort(arr) # creates a new copy\r\nnp.sort(arr)[:,::-1]# sort in descending order\r\n\r\n# argsort and lexsort\r\n# returns indices of the array after sorting\r\nvalues = np.array([5,0,1,3,2])\r\nindexer = values.argsort()\r\n#example: reorder 2D array by its first row\r\narr = randn(3,5)\r\narr[0] = values\r\narr[:,arr[0].argsort()]\r\n\r\n# lexsort: performs lexicographical sort, sort in paris\r\n# sort last array passed first, then go backwards\r\nfirst_name = np.array(['Bob','Jane','Steve','Bill','Barbra'])\r\nlast_name = np.array(['Jone','Arnold','Arnold','Jones','Walters'])\r\nsorter = np.lexsort((first_name, last_name))\r\nzip(last_name[sorter],first_name[sorter])\r\n\r\n# alternate sort algorithms\r\n# stable sort\r\nvalues = np.array(['2:first','2:second','1:first','1:second','1:third'])\r\nkey = np.array([2,2,1,1,1])\r\nindexer = key.argsort(kind='mergesort')\r\nvalues.take(indexer)\r\n# find elements in sorted array\r\narr = np.array([0, 1, 7, 12, 15])\r\n# searchsorted: perform binary search on sorted array, return index where\r\n# the value passed onto the searchsorted method need to be in order to maintain\r\n# the sort\r\narr.searchsorted(9)\r\n\r\n# NumPy Matrix Class\r\nXm = np.matrix(randn(5,5))\r\nYm = Xm[:, 0]\r\nYm.T * Xm *Ym # multiplication\r\nXm.I * Xm # inverse multiplication\r\n\r\n# Memory-mapped files: handling files that are too large to be loaded to the RAM\r\nfilename = 'mymap'\r\n#create a memory mapped file called mymap\r\nmmap = np.memmap(filename,dtype = 'float32',mode = 'w+', shape=(10000,10000))\r\n#take a slice\r\nsection = mmap[:5]\r\n#assign values to the slice\r\nsection[:] = np.random.randn(5, 10000)\r\nmmap.flush # this will write the slice onto the disk\r\n\r\n# This concludes today's study"
},
{
"alpha_fraction": 0.39466047286987305,
"alphanum_fraction": 0.4474753439426422,
"avg_line_length": 34.70833206176758,
"blob_id": "debcd6871dc48b7b5442cf847f4b9204b29e9d5c",
"content_id": "6eef76f3cd83a44dfb7ea61d3f3bf246fa0a3816",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1723,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 48,
"path": "/Spikes/spikedetekt2/spikedetekt2/processing/tests/test_pca.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"PCA tests.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom scipy import signal\nfrom spikedetekt2.processing import compute_pcs, project_pcs\n\n\n# -----------------------------------------------------------------------------\n# PCA tests\n# -----------------------------------------------------------------------------\ndef test_compute_pcs():\n \"\"\"Test PCA on a 2D array.\"\"\"\n # Horizontal ellipsoid.\n x = np.random.randn(20000, 2) * np.array([[10., 1.]])\n # Rotate the points by pi/4.\n a = 1./np.sqrt(2.)\n rot = np.array([[a, -a], [a, a]])\n x = np.dot(x, rot)\n # Compute the PCs.\n pcs = compute_pcs(x)\n assert pcs.ndim == 2\n assert (np.abs(pcs) - a).max() < 1e-2\n \ndef test_compute_pcs_3d():\n \"\"\"Test PCA on a 3D array.\"\"\"\n x1 = np.random.randn(20000, 2) * np.array([[10., 1.]])\n x2 = np.random.randn(20000, 2) * np.array([[1., 10.]])\n x = np.dstack((x1, x2))\n # Compute the PCs.\n pcs = compute_pcs(x)\n assert pcs.ndim == 3\n assert np.linalg.norm(pcs[0,:,0] - np.array([-1., 0.])) < 1e-2\n assert np.linalg.norm(pcs[1,:,0] - np.array([0., -1.])) < 1e-2\n assert np.linalg.norm(pcs[0,:,1] - np.array([0, 1.])) < 1e-2\n assert np.linalg.norm(pcs[1,:,1] - np.array([-1., 0.])) < 1e-2\n \ndef test_project_pcs():\n x1 = np.random.randn(20000, 2) * np.array([[10., 1.]])\n x2 = np.random.randn(20000, 2) * np.array([[1., 10.]])\n x = np.dstack((x1, x2))\n # Compute the PCs.\n pcs = compute_pcs(x)\n # Project the PCs.\n x_proj = project_pcs(x[0,...], pcs)\n assert x_proj.shape == (2, 2)\n \n "
},
{
"alpha_fraction": 0.6710824966430664,
"alphanum_fraction": 0.6941710710525513,
"avg_line_length": 29.452381134033203,
"blob_id": "1fca96cffc6a1fe5e1dfd942f592e900fddc39e0",
"content_id": "f1034afd20df08c1d29fa4a0fe2b550aad1f6c18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2642,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 84,
"path": "/PySynapse/archive/flow_chart_basic.py",
"repo_name": "sapphire008/Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis example demonstrates a very basic use of flowcharts: filter data,\r\ndisplaying both the input and output of the filter. The behavior of\r\nhe filter can be reprogrammed by the user.\r\n\r\nBasic steps are:\r\n - create a flowchart and two plots\r\n - input noisy data to the flowchart\r\n - flowchart connects data to the first plot, where it is displayed\r\n - add a gaussian filter to lowpass the data, then display it in the second plot.\r\n\"\"\"\r\n# import initExample ## Add path to library (just for examples; you do not need this)\r\n\r\n\r\nfrom pyqtgraph.flowchart import Flowchart\r\nfrom pyqtgraph.Qt import QtGui, QtCore\r\nimport pyqtgraph as pg\r\nimport numpy as np\r\nimport pyqtgraph.metaarray as metaarray\r\n\r\napp = QtGui.QApplication([])\r\n\r\n## Create main window with grid layout\r\nwin = QtGui.QMainWindow()\r\nwin.setWindowTitle('pyqtgraph example: Flowchart')\r\ncw = QtGui.QWidget()\r\nwin.setCentralWidget(cw)\r\nlayout = QtGui.QGridLayout()\r\ncw.setLayout(layout)\r\n\r\n## Create flowchart, define input/output terminals\r\nfc = Flowchart(terminals={\r\n 'dataIn': {'io': 'in'},\r\n 'dataOut': {'io': 'out'}\r\n})\r\nw = fc.widget()\r\n\r\n## Add flowchart control panel to the main window\r\nlayout.addWidget(fc.widget(), 0, 0, 2, 1)\r\n\r\n## Add two plot widgets\r\npw1 = pg.PlotWidget()\r\npw2 = pg.PlotWidget()\r\nlayout.addWidget(pw1, 0, 1)\r\nlayout.addWidget(pw2, 1, 1)\r\n\r\nwin.show()\r\n\r\n## generate signal data to pass through the flowchart\r\ndata = np.random.normal(size=1000)\r\ndata[200:300] += 1\r\ndata += np.sin(np.linspace(0, 100, 1000))\r\ndata = metaarray.MetaArray(data, info=[{'name': 'Time', 'values': np.linspace(0, 1.0, len(data))}, {}])\r\n\r\n## Feed data into the input terminal of the flowchart\r\nfc.setInput(dataIn=data)\r\n\r\n## populate the flowchart with a basic set of processing nodes.\r\n## (usually we let the user do this)\r\nplotList = {'Top Plot': pw1, 'Bottom Plot': pw2}\r\n\r\npw1Node = fc.createNode('PlotWidget', pos=(0, -150))\r\npw1Node.setPlotList(plotList)\r\npw1Node.setPlot(pw1)\r\n\r\npw2Node = fc.createNode('PlotWidget', pos=(150, -150))\r\npw2Node.setPlot(pw2)\r\npw2Node.setPlotList(plotList)\r\n\r\nfNode = fc.createNode('GaussianFilter', pos=(0, 0))\r\nfNode.ctrls['sigma'].setValue(5)\r\nfc.connectTerminals(fc['dataIn'], fNode['In'])\r\nfc.connectTerminals(fc['dataIn'], pw1Node['In'])\r\nfc.connectTerminals(fNode['Out'], pw2Node['In'])\r\nfc.connectTerminals(fNode['Out'], fc['dataOut'])\r\n\r\n\r\n\r\n## Start Qt event loop unless running in interactive mode or using pyside.\r\nif __name__ == '__main__':\r\n import sys\r\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n QtGui.QApplication.instance().exec_()\r\n"
}
] | 130 |
dkruijs/sunlight-theme-switcher
|
https://github.com/dkruijs/sunlight-theme-switcher
|
e47851bc67bd4543afed5ce3c08b204723e9197a
|
867076bea55061830b584b153dac156e5b826495
|
692440437f3597fd1144bddbf4f09c0057abebd3
|
refs/heads/master
| 2023-04-26T21:59:04.366931 | 2021-05-14T16:32:38 | 2021-05-14T16:32:38 | 268,793,462 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7064017653465271,
"alphanum_fraction": 0.7395143508911133,
"avg_line_length": 46.73684310913086,
"blob_id": "32b6fdcd9f705d3e4d19db718fc6a32d3ffec8d7",
"content_id": "1c1c20667c79314dfdd8ed8517a2d04059eae442",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 906,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 19,
"path": "/README.md",
"repo_name": "dkruijs/sunlight-theme-switcher",
"src_encoding": "UTF-8",
"text": "Gnome 3 daylight dark/light theme switcher\n---\n\n### Setting up \nCreate a recurring anacron job as follows (by adding it to `/etc/cron.{daily|weekly|monthly}`): \n```\n# period delay job-identifier command\n3 5 daylight-theme-switcher python -c 'from theme_switcher import update_sunrise_sunset; update_sunrise_sunset()'\n```\n(or use some custom cron job if desired)\n\n### How it works\nThis job will periodically update your crontab with a job that automatically switches dark and light themes depending on sunrise/sunset times, such as the following example:\n```\n# m h dom mon dow user command\n45 5 * * 0 python -c 'from theme_switcher import set_light_theme; set_light_theme()' # sunrise @ 05:45\n28 21 * * 0 python -c 'from theme_switcher import set_dark_theme; set_dark_theme()' # sunset @ 21:28\n```\nUsing only pure python and GTK commands. Tested on Ubuntu 20.04.2 LTS with GNOME 3 desktop version 3.36.8."
},
{
"alpha_fraction": 0.6295331716537476,
"alphanum_fraction": 0.6327389478683472,
"avg_line_length": 30.987178802490234,
"blob_id": "62c8f95d8ecea53be7c2df02055ea3c0c1b71215",
"content_id": "7f425df2d32919b06b9ca879cdbc8523eacec54b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4991,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 156,
"path": "/sunlight_theme_switcher.py",
"repo_name": "dkruijs/sunlight-theme-switcher",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport subprocess\nfrom dateutil import tz\n\nfrom crontab import CronTab \nfrom suntime import Sun, SunTimeException\nimport argparse\n\nUSER = 'daan'\nSCRIPT_PATH = '~/scripts/sunlight_theme_switcher.py'\n\nLIGHT_THEME = 'Yaru'\nDARK_THEME = 'Adwaita-dark'\n\n# Amsterdam\nLATITUDE = 52.37\nLONGITUDE = 4.90\n\ndef find_matching_jobs(cron, submatch, user=True):\n gen = cron.find_command(submatch)\n entries_found = False\n\n entries = []\n for entry in gen:\n entries_found = True\n entries.append(entry)\n\n if not entries_found:\n print('Found no matching cron entries.')\n\n return entries\n\ndef remove_crontab_jobs(user=True):\n \"\"\"Use this function to remove all crontab jobs generated by this script.\n \"\"\"\n cron = CronTab(user=user)\n entries = find_matching_jobs(cron, 'theme_switcher.py', user=user)\n\n if len(entries) < 1:\n print('Found no matching cron entries, none removed.')\n\n for entry in entries:\n print('Remove entry:')\n print(entry)\n cron.remove(entry)\n\n cron.write()\n\ndef update_crontab(sunrise, sunset, user=True):\n cron = CronTab(user=user)\n existing_entries = find_matching_jobs(cron, SCRIPT_PATH, user=user)\n \n if len(existing_entries) > 0:\n # Do some sanity checks \n assert len(existing_entries) == 2, 'Wrong number of `sunlight_theme_switcher.py` entries found.'\n assert 'Sunrise @' in existing_entries[0].comment, \"Comment for sunrise job didn't match expected format.\" \n assert 'Sunset @' in existing_entries[1].comment, \"Comment for sunrise job didn't match expected format.\"\n\n # OK, update existing jobs\n set_light_theme_job = existing_entries[0]\n set_dark_theme_job = existing_entries[1]\n\n set_light_theme_job.hour.on(sunrise.hour) \n set_light_theme_job.minute.on(sunrise.minute)\n\n set_dark_theme_job.hour.on(sunset.hour) \n set_dark_theme_job.minute.on(sunset.minute)\n\n for entry in cron:\n print(entry)\n\n else:\n\n set_light_theme_job = cron.new(command=f'python3 {SCRIPT_PATH} --mode light-mode', \n comment=f'Sunrise @ {sunrise.strftime(\"%H:%M\")}')\n set_light_theme_job.hour.on(sunrise.hour) \n set_light_theme_job.minute.on(sunrise.minute) \n \n set_dark_theme_job = cron.new(command=f'python3 {SCRIPT_PATH} --mode dark-mode', \n comment=f'Sunset @ {sunset.strftime(\"%H:%M\")}')\n set_dark_theme_job.hour.on(sunset.hour) \n set_dark_theme_job.minute.on(sunset.minute) \n \n if not set_dark_theme_job.is_valid():\n raise 'Cron job set_dark_theme_job found invalid.'\n if not set_light_theme_job.is_valid():\n raise 'Cron job set_light_theme_job found invalid.' \n\n for entry in cron:\n print(entry)\n\n cron.write()\n\ndef retrieve_sunrise_sunset():\n \"\"\"Uses the `suntime` library to retrieve sunrise and sunset times \n for the provided latitude/longitude coordinates in the local time zone.\n\n Returns:\n tuple: sunrise time (datetime object), sunset time (datetime object)\n \"\"\"\n\n # Auto-detect local timezone\n to_zone = tz.tzlocal()\n\n try: \n sun = Sun(LATITUDE, LONGITUDE)\n\n # Get today's sunrise and sunset in UTC and convert to local time zone\n today_sunrise = sun.get_sunrise_time().astimezone(to_zone)\n today_sunset = sun.get_sunset_time().astimezone(to_zone)\n\n except SunTimeException as e:\n print(f\"Couldn't get sunrise/sunset times: {e}\")\n\n return today_sunrise, today_sunset \n\ndef set_theme(theme_name):\n cmd = 'gsettings set org.gnome.desktop.interface gtk-theme ' + theme_name\n subprocess.run(cmd, shell=True)\n\ndef set_light_theme():\n set_theme(LIGHT_THEME)\n\ndef set_dark_theme():\n set_theme(DARK_THEME)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n help_statement = \"\"\"\"Choose run mode: \n - 'light-mode' to switch to the designated 'light' GTK theme;\n - 'dark-mode' to switch to the designated 'dark' GTK theme;\n - 'upsert' to insert or update cron commands; \n - 'reset' to remove all cron commands.\n \"\"\"\n parser.add_argument(\"--mode\", help=help_statement)\n args = parser.parse_args()\n\n if args.mode == 'light-mode':\n set_light_theme()\n \n elif args.mode == 'dark-mode':\n set_dark_theme()\n\n elif args.mode == 'upsert':\n sunrise, sunset = retrieve_sunrise_sunset()\n print(f'Retrieved Amsterdam sunrise time of {sunrise.strftime(\"%H:%M\")} and sunset time of {sunset.strftime(\"%H:%M\")} (local time)')\n\n print('New crontab commands:')\n update_crontab(sunrise=sunrise, sunset=sunset, user='daan')\n \n elif args.mode == 'reset':\n remove_crontab_jobs(user='daan')\n \n else:\n print(\"Did not receive valid '--mode' argument of 'light-mode', 'dark-mode', 'upsert' or 'reset', exiting.\")\n\n"
}
] | 2 |
zapemil/spaceship
|
https://github.com/zapemil/spaceship
|
00f4f8129c1413d845eab6349e7e24465176c005
|
74f5f7cc13d2c057a8b8da12f9f6f0d2a6d900c2
|
1f02d8ea1c1d47db4fdb3c11153354d2119a3d5f
|
refs/heads/master
| 2021-01-19T17:31:50.687250 | 2017-04-27T07:42:21 | 2017-04-27T07:42:21 | 88,326,769 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7317073345184326,
"alphanum_fraction": 0.7317073345184326,
"avg_line_length": 9.25,
"blob_id": "285343119976dc3efdb4c34a972abfd7b03b6209",
"content_id": "2bac3dce2ce5bedaee883497198bd6b7a3c7c175",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 166,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 16,
"path": "/imports.py",
"repo_name": "zapemil/spaceship",
"src_encoding": "UTF-8",
"text": "# He aquí todos los \"import\".\nimport pygame\n\nimport sys\n\nimport math\n\nimport random\n\nimport os\n\nfrom pygame.locals import*\n\npygame.init()\n\n# Segunda versión Git.\n"
},
{
"alpha_fraction": 0.4856269061565399,
"alphanum_fraction": 0.5115188360214233,
"avg_line_length": 27.132183074951172,
"blob_id": "0f0921c1c0658e3f0a4dd247058fd446c17ab5d7",
"content_id": "77ec91474de8ceec55bcebf9205282cd2276fced",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4905,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 174,
"path": "/ship_test.py",
"repo_name": "zapemil/spaceship",
"src_encoding": "UTF-8",
"text": "\"My sample game!\"\n\n\nfrom stuff import*\n\n# # # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n#version branch\n\nsize = (800,600)\nscreen = pygame.display.set_mode(size, 32)\n\nship = Ship(size)\nshot_L = []\nexplosion_L = []\nenemy_L = []\nenemyshot_L = []\nstar_L = []\n\nboom_s = pygame.mixer.Sound(\"boom.wav\")\nshot_s = pygame.mixer.Sound(\"shot.wav\")\nhighscore_s = pygame.mixer.Sound(\"highscore.wav\")\n\n\nwait = 0\ntime=0\n\ngameover = pygame.image.load(\"gameover.gif\")\n\ngameover_r = gameover.get_rect()\ngameover_r = gameover_r.move([size[0]/2-gameover_r.width/2, size[1]/2-gameover_r.height/2])\n\nscore = 0\n\npygame.mouse.set_visible(False)\n\nsong = pygame.mixer.Sound(\"technotris.wav\")\nsong.play(-1)\n\nfor star in range(100):\n star_L.append(Star(size))\n\n\n################## START #################\n\nwhile 1:\n time+=1\n keys = pygame.key.get_pressed()\n\n #restarts when you die and then press enter\n if ship==None and keys[K_RETURN]:\n song.play(-1)\n score=0\n ship=Ship(size)\n wait = 0\n time=1\n shot_L = []\n explosion_L = []\n enemy_L = []\n\n\n #checks if the ship should be shooting\n if keys[K_SPACE] and not wait and ship!=None:\n shot_L.append(Shot(ship.rect.center, 'right'))\n shot_s.play()\n wait=True\n \n if not keys[K_SPACE]: wait = False\n\n\n #creates a new alien \n if time%(1000000/(score+1)+1)==0:\n if random.randint(0,1)==0: enemy_L.append(Enemy2(list((size[0]+100,random.randint(50,size[1]-50))), enemyshot_L))\n elif random.randint(0,1)==0: enemy_L.append(Enemy3(list((size[0]+100,random.randint(50,size[1]-50)))))\n else: enemy_L.append(Enemy1([size[0]+100,random.randint(50,size[1]-50)]))\n if time%100==0: enemy_L.append(Enemy1([size[0]+100,random.randint(50,size[1]-50)]))\n\n \n #updates the shots\n for shot in shot_L:\n shot.update(screen)\n if shot.rect[0]>size[0]:\n shot_L.remove(shot)\n\n #updates the explosions\n for boom in explosion_L:\n boom.update(screen)\n if boom.life<=0:\n explosion_L.remove(boom)\n\n #updates the enemys\n for enemy in enemy_L:\n \n enemy.update(screen, time, ship)\n\n #blows up the ship if it runs into a alien\n if ship!=None and enemy.rect.colliderect(ship.rect) :\n pygame.time.wait(50)\n screen.fill([255,0,0])\n\n info = open(\"highscore.spacegame\",\"r\")\n highscore = int(info.readline())\n\n if score-1>highscore:\n highscore_s.play()\n info = open(\"highscore.spacegame\",\"w\")\n info.write(str(score))\n \n explosion_L.append(Explosion(ship_pos))\n ship = None\n enemy_L.remove(enemy)\n boom_s.play()\n \n # deletes an enemy once it's left the screen\n if enemy.rect.left<-50:\n enemy_L.remove(enemy)\n if ship!=None: score-=25\n \n #checks if the alien has been shot\n for shot in shot_L:\n if shot.rect.colliderect(enemy.rect):\n explosion_L.append(Explosion(enemy.rect.center))\n boom_s.play()\n try:\n enemy_L.remove(enemy)\n except:\n pass\n shot_L.remove(shot)\n \n if ship!=None: score+=100\n\n #decides if the ship has been blow up, then updates\n if ship !=None:\n ship.update(screen,keys,size)\n ship_pos = ship.rect.topleft\n else:\n screen.blit(gameover, gameover_r)\n song.stop()\n\n #updates stars\n for star in star_L:\n star.update(screen)\n\n #updates enemy's shots, checks if it hit the ship\n for shot in enemyshot_L:\n shot.update(screen)\n if ship!= None and shot.rect.colliderect(ship.rect):\n enemyshot_L.remove(shot)\n explosion_L.append(Explosion(ship_pos))\n ship = None\n boom_s.play()\n \n\n #scoring system\n score = max(0, score)\n\n score_t = pygame.font.SysFont(\"arial\", 20).render('score - '+str(score), False, (255,255,255))\n score_t_r = score_t.get_rect()\n score_t_r = score_t_r.move([size[0]/2-score_t_r.width/2, size[1]-size[1]/10])\n\n screen.blit(score_t, score_t_r)\n\n #updates the screen\n pygame.display.flip()\n screen.fill([0,0,0])\n\n\n # === ANTI-CRASH ===\n for event in pygame.event.get():\n if event.type == QUIT or keys[K_ESCAPE]:\n pygame.quit(); sys.exit()\n\n \n"
},
{
"alpha_fraction": 0.5332525372505188,
"alphanum_fraction": 0.5608240962028503,
"avg_line_length": 29.279815673828125,
"blob_id": "d1a3808ef7869a4eb3bac0d9fabc0c357e944a33",
"content_id": "d81ca55671fdf3896c7c0970885e2e9aaea0e240",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6601,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 218,
"path": "/stuff.py",
"repo_name": "zapemil/spaceship",
"src_encoding": "UTF-8",
"text": "from imports import*\nclass Shot(pygame.sprite.Sprite):\n def __init__(self, pos, direction):\n pygame.sprite.Sprite.__init__(self)\n \n self.image=pygame.Surface([6,6])\n pygame.draw.circle(self.image, [255,255,255],[3,3],3)\n self.image.set_colorkey([0,0,0])\n\n self.rect = self.image.get_rect()\n self.rect = self.rect.move(pos)\n\n self.direction = direction\n\n def update(self, screen):\n if self.direction=='right': self.rect = self.rect.move(5,0)\n else:self.rect = self.rect.move(-5,0)\n screen.blit(self.image, self.rect)\n\n\n\nclass Ship(pygame.sprite.Sprite):\n def __init__(self, size):\n pygame.sprite.Sprite.__init__(self)\n \n self.image = pygame.image.load(\"ship.gif\")\n self.rect = self.image.get_rect()\n\n self.rect = self.rect.move(size[0]/8,size[1]/2)\n\n self.speed = 4\n\n def update(self, screen, keys, size):\n \n #move\n if keys[K_UP]:\n self.rect = self.rect.move(0,-self.speed)\n if keys[K_DOWN]:\n self.rect = self.rect.move(0,self.speed)\n if keys[K_LEFT]:\n self.rect = self.rect.move(-self.speed,0)\n if keys[K_RIGHT]:\n self.rect = self.rect.move(self.speed,0)\n\n #bounce\n if self.rect.top<0:\n self.rect = self.rect.move(0,self.speed)\n if self.rect.bottom>size[1]:\n self.rect = self.rect.move(0,-self.speed)\n if self.rect.left<0:\n self.rect = self.rect.move(self.speed,0)\n if self.rect.right>size[0]:\n self.rect = self.rect.move(-self.speed,0)\n\n #render\n screen.blit(self.image, self.rect)\n\n\n\nclass Enemy1(pygame.sprite.Sprite):\n def __init__(self, pos):\n pygame.sprite.Sprite.__init__(self)\n \n self.image = pygame.image.load(\"enemy1.gif\")\n self.rect = self.image.get_rect()\n\n self.rect = self.rect.move(pos)\n\n self.speed = random.randint(-2,-1)\n\n def update(self, screen, time, ship):\n\n self.rect = self.rect.move([self.speed,0])\n screen.blit(self.image, self.rect)\n\nclass Enemy2(pygame.sprite.Sprite):\n def __init__(self, pos, shot_list):\n pygame.sprite.Sprite.__init__(self)\n \n self.image = pygame.image.load(\"enemy2.gif\")\n self.rect = self.image.get_rect()\n\n self.rect = self.rect.move(pos)\n\n self.speed = random.randint(-2,-1)\n\n self.shot_list = shot_list\n\n def update(self, screen, time, ship):\n if ship != None:\n\n if abs(ship.rect.center[1]-self.rect.center[1])<10 and time%20==0:\n self.shot_list.append(Shot(self.rect.center, 'left'))\n\n if time%2:\n if ship.rect.center[1]-self.rect.center[1]<0:\n self.rect = self.rect.move(0,-1*-self.speed)\n if ship.rect.center[1]-self.rect.center[1]>0:\n self.rect = self.rect.move(0,1*-self.speed)\n\n self.rect = self.rect.move([self.speed,0])\n screen.blit(self.image, self.rect)\n\n\n\nclass Enemy3(pygame.sprite.Sprite):\n def __init__(self, pos):\n pygame.sprite.Sprite.__init__(self)\n \n self.image = pygame.image.load(\"enemy3.gif\")\n \n ####\n self.image.set_colorkey([255,255,255])\n ####\n \n self.rect = self.image.get_rect()\n\n self.rect = self.rect.move(pos)\n\n self.speed = -6\n\n def update(self, screen, time, ship):\n if ship != None:\n if time%2:\n if ship.rect.center[1]-self.rect.center[1]<0:\n self.rect = self.rect.move(0,-1*-self.speed)\n if ship.rect.center[1]-self.rect.center[1]>0:\n self.rect = self.rect.move(0,1*-self.speed)\n\n self.rect = self.rect.move([self.speed,0])\n screen.blit(self.image, self.rect)\n\nclass Partical(pygame.sprite.Sprite):\n def __init__(self, pos, life):\n self.image = pygame.Surface([3,3])\n self.rect = self.image.get_rect()\n self.rect = self.rect.move(pos)\n\n self.life = life\n self.maxlife = life\n\n self.v_pos = [float(self.rect.center[0]),float(self.rect.center[1])]\n\n self.direction = [random.randint(1.0,50.0)*(random.randint(0,1)*2-1),random.randint(1.0,50.0)*(random.randint(0,1)*2-1)]\n self.magnitude = math.sqrt( self.direction[0]**2 + self.direction[1]**2 )\n self.speed = random.randint(1,100)/33.333\n\n self.direction[0] = self.direction[0]/self.magnitude*self.speed\n self.direction[1] = self.direction[1]/self.magnitude*self.speed\n def update(self, screen):\n self.life-=1\n self.dim = int(255*(float(self.life)/self.maxlife))\n \n self.v_pos = [self.v_pos[0] + self.direction[0], self.v_pos[1] + self.direction[1]]\n\n self.rect = self.rect.move(int(self.v_pos[0])-self.rect.center[0],int(self.v_pos[1])-self.rect.center[1])\n \n self.color = random.randint(0,3)\n if self.color==0:\n self.image.fill([self.dim,self.dim,self.dim])\n elif self.color==1:\n self.image.fill([self.dim,0,0])\n elif self.color==2:\n self.image.fill([0,self.dim,0])\n else:\n self.image.fill([0,0,self.dim])\n\n screen.blit(self.image, self.rect)\n\n\n\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, pos):\n pygame.sprite.Sprite.__init__(self)\n\n self.life=100\n\n self.particals = []\n for a in range(50):\n self.particals.append(Partical(pos, self.life))\n\n\n def update(self, screen):\n\n for partical in self.particals:\n partical.update(screen)\n \n self.life-=1\n\nclass Star(pygame.sprite.Sprite):\n def __init__(self,size):\n pygame.sprite.Sprite.__init__(self)\n \n self.image=pygame.Surface([1,1])\n \n self.rect = self.image.get_rect()\n self.rect = self.rect.move([random.randint(0,size[0]),random.randint(0,size[1])])\n\n self.size = size\n\n self.move = random.randint(5,25)\n self.current = self.move\n\n\n def update(self, screen):\n self.current-=1\n if self.current==0:\n self.current = self.move\n self.rect = self.rect.move(-1,0)\n \n self.brightness = random.randint(100-self.move*4,255-self.move*4)\n self.image.fill([self.brightness,self.brightness,self.brightness])\n \n \n if self.rect.right<0:\n self.rect = self.rect.move(self.size[0],0)\n\n screen.blit(self.image, self.rect)\n"
}
] | 3 |
FionaZZ92/OpenVINO_sample
|
https://github.com/FionaZZ92/OpenVINO_sample
|
379680b6f18de5f87639b28639d6113e0d9d4da6
|
65098583551553607b542e06916365e0de3d7d35
|
b2c468782c1235c190d53c3d829f3a7b24c8581b
|
refs/heads/master
| 2023-08-15T00:05:44.888773 | 2023-08-10T11:45:14 | 2023-08-10T11:45:14 | 155,801,045 | 4 | 2 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7461335062980652,
"alphanum_fraction": 0.7655009031295776,
"avg_line_length": 54.643409729003906,
"blob_id": "7f728e569c3685b24368180bdfc47f55fdc5a155",
"content_id": "d5ecdc4ee787a03ce0a8321890b566060b66b2af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7177,
"license_type": "no_license",
"max_line_length": 455,
"num_lines": 129,
"path": "/SD_controlnet/README.md",
"repo_name": "FionaZZ92/OpenVINO_sample",
"src_encoding": "UTF-8",
"text": "# ControlNet-canny benchmark with Stable Diffusion\n\n## Step 1: Prepare env and download model\n```shell\n$ mkdir ControlNet && cd ControlNet\n$ wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth\n\n$ conda create -n SD python==3.10\n$ conda activate SD\n\n$ pip install opencv-contrib-python\n$ pip install -q \"diffusers>=0.14.0\" \"git+https://github.com/huggingface/accelerate.git\" controlnet-aux gradio\n$ pip install openvino openvino-dev onnx\n$ pip install torch==1.13.1 #important, must use version<2.0\n\n$ git lfs install\n$ git clone https://huggingface.co/lllyasviel/sd-controlnet-canny \n$ git clone https://huggingface.co/runwayml/stable-diffusion-v1-5\n$ git clone https://huggingface.co/openai/clip-vit-large-patch14 \n\n$ wget https://huggingface.co/takuma104/controlnet_dev/blob/main/gen_compare/control_images/vermeer_512x512.png \n```\n\n## Step 2: Convert Model to IR\nIn this case, we generate static model with batch_size=2:\n```shell\n$ python get_model.py -b 2 -sd stable-diffusion-v1-5/\n```\nPlease check your current path, make sure you already generate below models currently. Other files can be deleted for saving space.\n+ controlnet-canny.<xml|bin>\n+ text_encoder.<xml|bin>\n+ unet_controlnet.<xml|bin>\n+ vae_decoder.<xml|bin>\n\n## Step 3: Run test\n```shell\n$ python run_pipe.py\n```\nThe E2E inference time with 2 prompts(bs=2) on Arc 770 by OV 2023.0.1 is like below:\n```shell\n...\nInference time(20 its): 6.6 s\n```\n\nNow, use below source image to generate image with similar canny.\n\n\n\n## Step 4: Enable LoRA weights for Stable Diffusion + ControlNet pipeline (Choose one of below 3 methods)\n\n## Step 4-1: Enable lora by pytorch_lora_weights.bin\nThis step introduce the method to add lora weights to Stable diffusion Unet model by `pipe.unet.load_attn_procs(...)` function. You can visit https://civitai.com/tag/lora to get lora model. Let's use one lora weights on huggingface as an example:\n```shell\n$ git clone https://huggingface.co/TheUpperCaseGuy/finetune-lora-stable-diffusion\n$ rm unet_controlnet.* unet_controlnet/unet_controlnet.onnx\n$ python get_model.py -b 2 -sd stable-diffusion-v1-5/ -lt bin -lw finetune-lora-stable-diffusion/\n```\nThen, run pipeline inference program to check results.\n```shell\n$ python run_pipe.py\n```\nThe lora weights appended SD model with controlnet pipeline can generate image like below:\n\n\n\n## Step 4-2: Enable lora by safetensors typed weights\nThis step introduce the method to add lora weights to Stable diffusion Unet model by `diffusers/scripts/convert_lora_safetensor_to_diffusers.py`.\n```shell\n$ git clone https://huggingface.co/ntc-ai/fluffy-stable-diffusion-1.5-lora-trained-without-data\n$ git clone https://github.com/huggingface/diffusers.git && cd diffusers\n$ python scripts/convert_lora_safetensor_to_diffusers.py --base_model_path ../stable-diffusion-v1-5/ --checkpoint_path ../fluffy-stable-diffusion-1.5-lora-trained-without-data/fluffySingleWordConcept_v10.safetensors --dump_path ../stable-diffusion-v1-5-fluffy-lora --alpha=1.5\n$ cd .. && rm unet_controlnet.* unet_controlnet/unet_controlnet.onnx text_encoder.*\n$ python get_model.py -b 2 -sd stable-diffusion-v1-5-fluffy-lora/ -lt safetensors\n```\nThen, run pipeline inference program to check results.\n```shell\n$ python run_pipe.py\n```\nThe lora weights appended SD model with controlnet pipeline can generate image like below:\n\n\n\n## Step 4-3: Enable runtime lora merging by MatcherPass\nThis step introduces the method to add lora weights in runtime before unet model compiling. This method is to extract lora weights in safetensors file and find the corresponding weights in unet model and insert weights bias. The common method to add lora weights is:\n `W = W0 + W_bias(alpha * torch.mm(lora_up, lora_down))`.\n\nI intend to insert openvino `opset10.add(W0,W_bias)`. The original attention weights in Unet model is loaded by `Const` op, the common processing path is `Const`->`Convert`->`Matmul`->`...`, if we add the lora weights, we should insert the calculated lora weight bias as `Const`->`Convert`->`Add`->`Matmul`->`...`. In this function, we adopt openvino.runtime.passes.MathcerPass to insert `opset10.add` function.\n\nPlease make sure your current unet and text_encoder model is generated from original Stable Diffusion, if you continued from Step 4-2, please do below operations firstly. If you continued from Step 3, you can skip re-generating Unet and text-encoder:\n```shell\nrm unet_controlnet.* unet_controlnet/unet_controlnet.onnx text_encoder.*\npython get_model.py -b 2 -sd stable-diffusion-v1-5/\n``` \nRuntime add LoRA weights on original Stable Diffusion with ControlNet pipeline just with 1 step:\n```shell\npython run_pipe.py -lp fluffy-stable-diffusion-1.5-lora-trained-without-data/fluffySingleWordConcept_v10.safetensors -a 1.5\n```\nThe lora weights appended SD model with controlnet pipeline can generate image like below:\n\n\n\n## Step 4-4: Enable multiple lora \nThere are many methods to add multiple lora weights. I list two methods here. Assume you have two LoRA weigths, LoRA A and LoRA B. You can simply follow the Step 4-3 to loop the `MatcherPass` function to insert between original Unet `Convert` layer and `Add` layer of LoRA A. It's easy to implement. However, it is not good at performance.\n\n\n\nPlease consider about the Logic of `MatcherPass` function. This fucntion required to filter out all layer with the `Convert` type, then through the condition judgement if each `Convert` layer connected by weights `Constant` has been fine-tuned and updated in LoRA weights file. The main costs of LoRA enabling is costed by `InsertLoRA()` function, thus the main idea is to just invoke `InsertLoRA()` function once, but append multiple LoRA files' weights.\n\n\n\nBy above method to add multiple LoRA, the cost of appending 2 or more LoRA weights almost same as adding 1 LoRA weigths. Now, let's change the Stable Diffusion with https://huggingface.co/dreamlike-art/dreamlike-anime-1.0 to generate image with styles of animation. I pick two LoRA weights for SD 1.5 from https://civitai.com/tag/lora.\n\n+ soulcard: https://civitai.com/models/67927?modelVersionId=72591\n+ epi_noiseoffset: https://civitai.com/models/13941/epinoiseoffset\n\nYou probably need to do prompt engineering work to generate a useful prompt like below:\n\n+ prompt: \"1girl, cute, beautiful face, portrait, cloudy mountain, outdoors, trees, rock, river, (soul card:1.2), highly intricate details, realistic light, trending on cgsociety,neon details, ultra realistic details, global illumination, shadows, octane render, 8k, ultra sharp\"\n+ Negative prompt: \"3d, cartoon, lowres, bad anatomy, bad hands, text, error\"\n+ Seed: 0\n+ num_steps: 30\n+ canny low_threshold: 100\n\n```shell\n$ python run_pipe.py -lp soulcard.safetensors -a 0.7 -lp2 epi_noiseoffset2.safetensors -a2 0.7\n```\nYou can get a wonderful image which generate an animated girl with soulcard typical border like below:\n\n"
},
{
"alpha_fraction": 0.6509250998497009,
"alphanum_fraction": 0.6577823758125305,
"avg_line_length": 41.2349739074707,
"blob_id": "da4c92cf7795a1d2e1d96b8b42ee9950617eb2bc",
"content_id": "61515114726982e6a9b48dc89e5403da5d20ad33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7729,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 183,
"path": "/SD_controlnet/get_model.py",
"repo_name": "FionaZZ92/OpenVINO_sample",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\nimport torch\nimport argparse\nfrom torch.onnx import _export as torch_onnx_export\nfrom openvino.tools.mo import convert_model\nfrom openvino.runtime import serialize\nfrom diffusers import StableDiffusionControlNetPipeline, ControlNetModel\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse and return command line arguments.\"\"\"\n parser = argparse.ArgumentParser(add_help=False)\n args = parser.add_argument_group('Options')\n # fmt: off\n args.add_argument('-h', '--help', action = 'help',\n help='Show this help message and exit.')\n args.add_argument('-b', '--batch', type = int, default = 1, required = True,\n help='Required. batch_size for solving single/multiple prompt->image generation.')\n args.add_argument('-sd','--sd_weights', type = str, default=\"\", required = True,\n help='Specify the path of stable diffusion model')\n args.add_argument('-lt','--lora_type', type = str, default=\"\", required = False,\n help='Specify the type of lora weights, you can choose \"safetensors\" or \"bin\"')\n args.add_argument('-lw', '--lora_weights', type = str, default=\"\", required = False,\n help='Add lora weights to Stable diffusion.')\n # fmt: on\n return parser.parse_args()\n\nargs = parse_args()\n###covnert controlnet to IR\ncontrolnet = ControlNetModel.from_pretrained(\"sd-controlnet-canny\", torch_dtype=torch.float32)\ninputs = {\n \"sample\": torch.randn((args.batch*2, 4, 64, 64)), \n \"timestep\": torch.tensor(1),\n \"encoder_hidden_states\": torch.randn((args.batch*2,77,768)),\n \"controlnet_cond\": torch.randn((args.batch*2,3,512,512)) #batch=2\n}\n'''dynamic_names = {\n \"sample\": {0: \"batch\"},\n \"encoder_hidden_states\": {0: \"batch\", 1: \"sequence\"},\n \"controlnet_cond\": {0: \"batch\"},\n}'''\n\nCONTROLNET_ONNX_PATH = Path('controlnet-canny.onnx')\nCONTROLNET_OV_PATH = CONTROLNET_ONNX_PATH.with_suffix('.xml')\ncontrolnet.eval()\nwith torch.no_grad():\n down_block_res_samples, mid_block_res_sample = controlnet(**inputs, return_dict=False)\n\ncontrolnet_output_names = [f\"down_block_res_sample_{i}\" for i in range(len(down_block_res_samples))]\ncontrolnet_output_names.append(\"mid_block_res_sample\")\n\nif not CONTROLNET_OV_PATH.exists():\n if not CONTROLNET_ONNX_PATH.exists():\n\n with torch.no_grad():\n torch_onnx_export(controlnet, inputs, CONTROLNET_ONNX_PATH, input_names=list(inputs),\n output_names=controlnet_output_names,onnx_shape_inference=False, #dynamic_axes=dynamic_names,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)\n\n ov_ctrlnet = convert_model(CONTROLNET_ONNX_PATH, compress_to_fp16=True)\n serialize(ov_ctrlnet,CONTROLNET_OV_PATH)\n del ov_ctrlnet\n print('ControlNet successfully converted to IR')\nelse:\n print(f\"ControlNet will be loaded from {CONTROLNET_OV_PATH}\")\n\n\n###convert SD-Unet model to IR\npipe = StableDiffusionControlNetPipeline.from_pretrained(args.sd_weights, controlnet=controlnet)\nif args.lora_type == \"bin\":\n pipe.unet.load_attn_procs(args.lora_weights)\nelif args.lora_type == \"safetensors\":\n print(\"==make sure you already generate new SD model with lora by diffusers.scripts.convert_lora_safetensor_to_diffusers.py==\")\nelse:\n print(\"==No lora==\")\nUNET_ONNX_PATH = Path('unet_controlnet/unet_controlnet.onnx')\nUNET_OV_PATH = UNET_ONNX_PATH.parents[1] / 'unet_controlnet.xml'\n\nif not UNET_OV_PATH.exists():\n if not UNET_ONNX_PATH.exists():\n UNET_ONNX_PATH.parent.mkdir(exist_ok=True)\n inputs.pop(\"controlnet_cond\", None)\n inputs[\"down_block_additional_residuals\"] = down_block_res_samples\n inputs[\"mid_block_additional_residual\"] = mid_block_res_sample\n\n unet = pipe.unet\n unet.eval()\n\n input_names = [\"sample\", \"timestep\", \"encoder_hidden_states\", *controlnet_output_names]\n '''dynamic_names = {\n \"sample\": {0: \"batch\"},\n \"encoder_hidden_states\": {0: \"batch\", 1: \"sequence\"},\n \"controlnet_cond\": {0: \"batch\"},\n }'''\n\n with torch.no_grad():\n torch_onnx_export(unet, inputs, str(UNET_ONNX_PATH), #dynamic_axes=dynamic_names,\n input_names=input_names, output_names=[\"sample_out\"], onnx_shape_inference=False, opset_version=15)\n del unet\n del pipe.unet\n ov_unet = convert_model(UNET_ONNX_PATH, compress_to_fp16=True)\n serialize(ov_unet,UNET_OV_PATH)\n del ov_unet\n print('Unet successfully converted to IR')\nelse:\n del pipe.unet\n print(f\"Unet will be loaded from {UNET_OV_PATH}\")\n\n###convert SD-text_encoder model to IR\nTEXT_ENCODER_ONNX_PATH = Path('text_encoder.onnx')\nTEXT_ENCODER_OV_PATH = TEXT_ENCODER_ONNX_PATH.with_suffix('.xml')\n\ndef convert_encoder_onnx(text_encoder:torch.nn.Module, onnx_path:Path):\n if not onnx_path.exists():\n input_ids = torch.ones((args.batch, 77), dtype=torch.long)\n # switch model to inference mode\n text_encoder.eval()\n\n # disable gradients calculation for reducing memory consumption\n with torch.no_grad():\n # infer model, just to make sure that it works\n text_encoder(input_ids)\n # export model to ONNX format\n torch_onnx_export(\n text_encoder, # model instance\n input_ids, # inputs for model tracing\n onnx_path, # output file for saving result\n input_names=['tokens'], # model input name for onnx representation\n output_names=['last_hidden_state', 'pooler_out'], # model output names for onnx representation\n opset_version=14, # onnx opset version for export\n onnx_shape_inference=False\n )\n print('Text Encoder successfully converted to ONNX')\n\nif not TEXT_ENCODER_OV_PATH.exists():\n convert_encoder_onnx(pipe.text_encoder, TEXT_ENCODER_ONNX_PATH)\n ov_txten = convert_model(TEXT_ENCODER_ONNX_PATH, compress_to_fp16=True)\n serialize(ov_txten,TEXT_ENCODER_OV_PATH)\n print('Text Encoder successfully converted to IR')\nelse:\n print(f\"Text encoder will be loaded from {TEXT_ENCODER_OV_PATH}\")\n\n\n###convert VAE model to IR\nVAE_DECODER_ONNX_PATH = Path('vae_decoder.onnx')\nVAE_DECODER_OV_PATH = VAE_DECODER_ONNX_PATH.with_suffix('.xml')\n\ndef convert_vae_decoder_onnx(vae: torch.nn.Module, onnx_path: Path):\n \"\"\"\n Convert VAE model to ONNX, then IR format. \n Function accepts pipeline, creates wrapper class for export only necessary for inference part, \n prepares example inputs for ONNX conversion via torch.export, \n Parameters: \n vae (torch.nn.Module): VAE model\n onnx_path (Path): File for storing onnx model\n Returns:\n None\n \"\"\"\n class VAEDecoderWrapper(torch.nn.Module):\n def __init__(self, vae):\n super().__init__()\n self.vae = vae\n\n def forward(self, latents):\n return self.vae.decode(latents)\n\n if not onnx_path.exists():\n vae_decoder = VAEDecoderWrapper(vae)\n latents = torch.zeros((args.batch, 4, 64, 64))\n\n vae_decoder.eval()\n with torch.no_grad():\n torch.onnx.export(vae_decoder, latents, onnx_path, input_names=[\n 'latents'], output_names=['sample'])\n print('VAE decoder successfully converted to ONNX')\n\n\nif not VAE_DECODER_OV_PATH.exists():\n convert_vae_decoder_onnx(pipe.vae, VAE_DECODER_ONNX_PATH)\n ov_vae = convert_model(VAE_DECODER_ONNX_PATH, compress_to_fp16=True)\n serialize(ov_vae,VAE_DECODER_OV_PATH)\n print('VAE decoder successfully converted to IR')\nelse:\n print(f\"VAE decoder will be loaded from {VAE_DECODER_OV_PATH}\")\n"
},
{
"alpha_fraction": 0.5902284979820251,
"alphanum_fraction": 0.6015556454658508,
"avg_line_length": 45.12107467651367,
"blob_id": "4acdb75b32bc46ba4f22ecb8108370c896d3425c",
"content_id": "5505abde771852711edbeb4cf7990421679774a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20570,
"license_type": "no_license",
"max_line_length": 307,
"num_lines": 446,
"path": "/SD_controlnet/run_pipe.py",
"repo_name": "FionaZZ92/OpenVINO_sample",
"src_encoding": "UTF-8",
"text": "from PIL import Image\nfrom diffusers import UniPCMultistepScheduler, EulerAncestralDiscreteScheduler, StableDiffusionControlNetPipeline, ControlNetModel\nimport torch\nimport numpy as np\nimport argparse\nfrom typing import Union, List, Optional, Tuple\nfrom diffusers.utils import load_image\nfrom diffusers.pipeline_utils import DiffusionPipeline\nfrom transformers import CLIPTokenizer\nfrom openvino.runtime import Core, Model, Type\nfrom openvino.runtime.passes import Manager, GraphRewrite, MatcherPass, WrapType, Matcher\nfrom openvino.runtime import opset10 as ops\nfrom safetensors.torch import load_file\nimport time\nimport cv2\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse and return command line arguments.\"\"\"\n parser = argparse.ArgumentParser(add_help=False)\n args = parser.add_argument_group('Options')\n # fmt: off\n args.add_argument('-h', '--help', action = 'help',\n help='Show this help message and exit.')\n args.add_argument('-lp', '--lora_path', type = str, default = \"\", required = False,\n help='Specify path of lora weights *.safetensors')\n args.add_argument('-a','--alpha',type = float, default = 0.75, required = False,\n help='Specify the merging ratio of lora weights, default is 0.75.')\n args.add_argument('-lp2', '--lora_path2', type = str, default = \"\", required = False,\n help='Specify path of lora weights *.safetensors')\n args.add_argument('-a2','--alpha2',type = float, default = 0.75, required = False,\n help='Specify the merging ratio of lora weights, default is 0.75.') \n args.add_argument('-c','--cache',type = bool, default = False, required = False,\n help='Use model cache if run on GPU device, please make sure you upgrade OV to 2023.1')\n return parser.parse_args()\n\ndef scale_fit_to_window(dst_width:int, dst_height:int, image_width:int, image_height:int):\n im_scale = min(dst_height / image_height, dst_width / image_width)\n return int(im_scale * image_width), int(im_scale * image_height)\n\ndef preprocess(image: Image.Image):\n src_width, src_height = image.size\n dst_width, dst_height = scale_fit_to_window(512, 512, src_width, src_height)\n image = np.array(image.resize((dst_width, dst_height), resample=Image.Resampling.LANCZOS))[None, :]\n pad_width = 512 - dst_width\n pad_height = 512 - dst_height\n pad = ((0, 0), (0, pad_height), (0, pad_width), (0, 0))\n image = np.pad(image, pad, mode=\"constant\")\n #image = np.squeeze(image)\n #image = cv2.copyMakeBorder(image, int(pad_height//2), 512-int(pad_height//2)-dst_height, int(pad_width//2), 512-int(pad_width//2)-dst_width, cv2.BORDER_CONSTANT, (0,0,0) );\n #cv2.imwrite(\"preprocess.png\",image)\n #image = np.expand_dims(image, axis=0)\n image = image.astype(np.float32) / 255.0\n image = image.transpose(0, 3, 1, 2)\n return image, pad\n\n\ndef randn_tensor(\n shape: Union[Tuple, List],\n dtype: Optional[np.dtype] = np.float32,\n):\n latents = np.random.randn(*shape).astype(dtype)\n\n return latents\n\nclass InsertLoRA(MatcherPass):\n def __init__(self,lora_dict_list):\n MatcherPass.__init__(self)\n self.model_changed = False\n\n param = WrapType(\"opset10.Convert\")\n\n def callback(matcher: Matcher) -> bool:\n root = matcher.get_match_root()\n root_output = matcher.get_match_value()\n for y in lora_dict_list:\n if root.get_friendly_name().replace('.','_').replace('_weight','') == y[\"name\"]:\n consumers = root_output.get_target_inputs()\n lora_weights = ops.constant(y[\"value\"],Type.f32,name=y[\"name\"])\n add_lora = ops.add(root,lora_weights,auto_broadcast='numpy')\n for consumer in consumers:\n consumer.replace_source_output(add_lora.output(0))\n\n # For testing purpose\n self.model_changed = True\n # Use new operation for additional matching\n self.register_new_node(add_lora)\n\n # Root node wasn't replaced or changed\n return False\n\n self.register_matcher(Matcher(param,\"InsertLoRA\"), callback)\n\n\nclass OVContrlNetStableDiffusionPipeline(DiffusionPipeline):\n \"\"\"\n OpenVINO inference pipeline for Stable Diffusion with ControlNet guidence\n \"\"\"\n def __init__(\n self,\n tokenizer: CLIPTokenizer,\n scheduler,\n core: Core,\n controlnet: Model,\n text_encoder: Model,\n unet: Model,\n vae_decoder: Model,\n state_dict,\n alpha_list,\n device:str = \"AUTO\"\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.vae_scale_factor = 8 #2 ** (len(self.vae.config.block_out_channels) - 1)\n self.scheduler = scheduler\n self.load_models(core, device, controlnet, text_encoder, unet, vae_decoder, state_dict, alpha_list)\n self.set_progress_bar_config(disable=True)\n \n\n def load_models(self, core: Core, device: str, controlnet:Model, text_encoder: Model, unet: Model, vae_decoder: Model, state_dict, alpha_list):\n if state_dict != None:\n ov_unet = core.read_model(unet)\n ov_text_encoder = core.read_model(text_encoder)\n ##===Add lora weights===\n visited = []\n lora_dict = {}\n lora_dict_list = []\n LORA_PREFIX_UNET = \"lora_unet\"\n LORA_PREFIX_TEXT_ENCODER = \"lora_te\"\n flag = 0\n manager = Manager()\n for iter in range(len(state_dict)):\n visited = []\n for key in state_dict[iter]:\n if \".alpha\" in key or key in visited:\n continue\n if \"text\" in key:\n layer_infos = key.split(LORA_PREFIX_TEXT_ENCODER + \"_\")[-1].split(\".\")[0]\n lora_dict = dict(name=layer_infos)\n lora_dict.update(type=\"text_encoder\")\n else:\n layer_infos = key.split(LORA_PREFIX_UNET + \"_\")[1].split('.')[0]\n lora_dict = dict(name=layer_infos)\n lora_dict.update(type=\"unet\")\n pair_keys = []\n if \"lora_down\" in key:\n pair_keys.append(key.replace(\"lora_down\", \"lora_up\"))\n pair_keys.append(key)\n else:\n pair_keys.append(key)\n pair_keys.append(key.replace(\"lora_up\", \"lora_down\"))\n\n # update weight\n if len(state_dict[iter][pair_keys[0]].shape) == 4:\n weight_up = state_dict[iter][pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)\n weight_down = state_dict[iter][pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)\n lora_weights = alpha_list[iter] * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3)\n lora_dict.update(value=lora_weights)\n else:\n weight_up = state_dict[iter][pair_keys[0]].to(torch.float32)\n weight_down = state_dict[iter][pair_keys[1]].to(torch.float32)\n lora_weights = alpha_list[iter] * torch.mm(weight_up, weight_down)\n lora_dict.update(value=lora_weights)\n #check if this layer has been appended in lora_dict_list\n for ll in lora_dict_list:\n if ll[\"name\"] == lora_dict[\"name\"]:\n ll[\"value\"] += lora_dict[\"value\"] # all lora weights added together\n flag = 1\n if flag == 0:\n lora_dict_list.append(lora_dict)\n # update visited list\n for item in pair_keys:\n visited.append(item)\n flag = 0\n manager.register_pass(InsertLoRA(lora_dict_list))\n if (True in [('type','text_encoder') in l.items() for l in lora_dict_list]):\n manager.run_passes(ov_text_encoder)\n self.text_encoder = core.compile_model(ov_text_encoder, device)\n manager.run_passes(ov_unet)\n self.unet = core.compile_model(ov_unet, device)\n else:\n self.text_encoder = core.compile_model(text_encoder, device)\n self.unet = core.compile_model(unet, device)\n\n self.text_encoder_out = self.text_encoder.output(0)\n self.controlnet = core.compile_model(controlnet, device)\n self.unet_out = self.unet.output(0)\n self.vae_decoder = core.compile_model(vae_decoder, device)\n self.vae_decoder_out = self.vae_decoder.output(0)\n def prepare_image(self, image: Image, batch_size: int, num_images_per_prompt:int = 1, do_classifier_free_guidance: bool = True):\n orig_width, orig_height = image.size\n image, pad = preprocess(image)\n height, width = image.shape[-2:]\n if image.shape[0] == 1:\n repeat_by = batch_size\n else:\n repeat_by = num_images_per_prompt\n image = image.repeat(repeat_by, axis=0)\n if do_classifier_free_guidance:\n image = np.concatenate(([image] * 2))\n return image, height, width, pad, orig_height, orig_width\n\n\n def __call__(\n self,\n prompt: Union[str, List[str]],\n image: Image.Image,\n num_inference_steps: int = 10,\n negative_prompt: Union[str, List[str]] = None,\n guidance_scale: float = 7.5,\n controlnet_conditioning_scale: Union[float, List[float]] = 1.0,\n control_guidance_start: Union[float, List[float]] = [0.0], #single controlnet\n control_guidance_end: Union[float, List[float]] = [1.0], #single controlnet\n eta: float = 0.0,\n latents: Optional[np.array] = None,\n output_type: Optional[str] = \"pil\",\n ):\n\n # 1. Define call parameters\n batch_size = 1 if isinstance(prompt, str) else len(prompt)\n\n do_classifier_free_guidance = guidance_scale > 1.0\n # 2. Encode input prompt\n text_embeddings = self._encode_prompt(prompt, negative_prompt=negative_prompt)\n\n # 3. Preprocess image\n image, height, width, pad, orig_height, orig_width = self.prepare_image(image, batch_size, do_classifier_free_guidance)\n\n # 4. set timesteps\n self.scheduler.set_timesteps(num_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # 6. Prepare latent variables\n num_channels_latents = 4\n latents = self.prepare_latents(\n batch_size,\n num_channels_latents,\n height,\n width,\n text_embeddings.dtype,\n latents,\n )\n\n # 6.1 Create tensor stating which controlnets to keep\n controlnet_keep = []\n for i in range(len(timesteps)):\n keeps = [\n 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)\n for s, e in zip(control_guidance_start, control_guidance_end)\n ]\n controlnet_keep.append(keeps[0]) #keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # Expand the latents if we are doing classifier free guidance.controlnet_pip\n # The latents are expanded 3 times because for pix2pix the guidance\\\n # is applied for both the text and the input image.\n latent_model_input = np.concatenate(\n [latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n #text_embeddings = np.split(text_embeddings, 2)[1] if do_classifier_free_guidance else text_embeddings\n\n if isinstance(controlnet_keep[i], list):\n cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]\n else:\n cond_scale = controlnet_conditioning_scale * controlnet_keep[i]\n \n result = self.controlnet([latent_model_input, t, text_embeddings, image, cond_scale])\n down_and_mid_blok_samples = [sample * cond_scale for _, sample in result.items()]\n\n # predict the noise residual\n noise_pred = self.unet([latent_model_input, t, text_embeddings, *down_and_mid_blok_samples])[self.unet_out]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = np.split(noise_pred,2) #noise_pred[0], noise_pred[1]\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents)).prev_sample.numpy()\n\n # update progress\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n\n # 8. Post-processing\n image = self.decode_latents(latents, pad)\n\n # 9. Convert to PIL\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n image = [img.resize((orig_width, orig_height), Image.Resampling.LANCZOS) for img in image]\n else:\n image = [cv2.resize(img, (orig_width, orig_width))\n for img in image]\n\n return image\n\n def _encode_prompt(self, prompt:Union[str, List[str]], num_images_per_prompt:int = 1, do_classifier_free_guidance:bool = True, negative_prompt:Union[str, List[str]] = None):\n batch_size = len(prompt) if isinstance(prompt, list) else 1\n\n # tokenize input prompts\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"np\",\n )\n text_input_ids = text_inputs.input_ids\n\n text_embeddings = self.text_encoder(\n text_input_ids)[self.text_encoder_out]\n\n # duplicate text embeddings for each generation per prompt\n if num_images_per_prompt != 1:\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = np.tile(\n text_embeddings, (1, num_images_per_prompt, 1))\n text_embeddings = np.reshape(\n text_embeddings, (bs_embed * num_images_per_prompt, seq_len, -1))\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n max_length = text_input_ids.shape[-1]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n else:\n uncond_tokens = negative_prompt\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"np\",\n )\n\n uncond_embeddings = self.text_encoder(uncond_input.input_ids)[self.text_encoder_out]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = np.tile(uncond_embeddings, (1, num_images_per_prompt, 1))\n uncond_embeddings = np.reshape(uncond_embeddings, (batch_size * num_images_per_prompt, seq_len, -1))\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])\n\n return text_embeddings\n\n def prepare_latents(self, batch_size:int, num_channels_latents:int, height:int, width:int, dtype:np.dtype = np.float32, latents:np.ndarray = None):\n\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if latents is None:\n latents = randn_tensor(shape, dtype=dtype)\n else:\n latents = latents\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def decode_latents(self, latents:np.array, pad:Tuple[int]):\n\n latents = 1 / 0.18215 * latents # 1 / self.vae.config.scaling_factor * latents\n image = self.vae_decoder(latents)[self.vae_decoder_out]\n (_, end_h), (_, end_w) = pad[1:3]\n h, w = image.shape[2:]\n unpad_h = h - end_h\n unpad_w = w - end_w\n image = image[:, :, :unpad_h, :unpad_w]\n image = np.clip(image / 2 + 0.5, 0, 1)\n image = np.transpose(image, (0, 2, 3, 1))\n return image\n\nargs = parse_args()\ncontrolnet = ControlNetModel.from_pretrained(\"sd-controlnet-canny\", torch_dtype=torch.float32).cpu()\npipe = StableDiffusionControlNetPipeline.from_pretrained(\"stable-diffusion-v1-5\", controlnet=controlnet)\n\ntokenizer = CLIPTokenizer.from_pretrained('clip-vit-large-patch14')\nscheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)\n#scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\nCONTROLNET_OV_PATH = \"controlnet-canny.xml\"\nTEXT_ENCODER_OV_PATH = \"text_encoder.xml\"\nUNET_OV_PATH = \"unet_controlnet.xml\"\nVAE_DECODER_OV_PATH = \"vae_decoder.xml\"\n\ncore = Core()\nif args.cache == True:\n core.set_property({'CACHE_DIR': './cache'}) #enable SD model cache since OV2023.1\n#====Add lora======\nLORA_PATH = []\nLORA_ALPHA = []\nif args.lora_path != \"\":\n #lora_pair = dict(path=args.lora_path)\n #lora_pair.update(alpha=args.alpha)\n LORA_PATH.append(args.lora_path)\n LORA_ALPHA.append(args.alpha)\n if args.lora_path2 != \"\":\n #lora_pair = dict(path=args.lora_path2)\n #lora_pair.update(alpha=args.alpha2)\n LORA_PATH.append(args.lora_path2)\n LORA_ALPHA.append(args.alpha)\n\nstate_dict = []\n# load LoRA weight from .safetensors\nif len(LORA_PATH) == 0:\n ov_pipe = OVContrlNetStableDiffusionPipeline(tokenizer, scheduler, core, CONTROLNET_OV_PATH, TEXT_ENCODER_OV_PATH, UNET_OV_PATH, VAE_DECODER_OV_PATH, None, None, device=\"AUTO\") #change to CPU or GPU\nelse:\n [state_dict.append(load_file(p)) for p in LORA_PATH] #state_dict is list of lora list\n ov_pipe = OVContrlNetStableDiffusionPipeline(tokenizer, scheduler, core, CONTROLNET_OV_PATH, TEXT_ENCODER_OV_PATH, UNET_OV_PATH, VAE_DECODER_OV_PATH, state_dict, LORA_ALPHA, device=\"AUTO\") #change to CPU or GPU\n\nimage = load_image(\"vermeer_512x512.png\")\nimage = np.array(image)\n\nlow_threshold = 150\nhigh_threshold = 200\n\nimage = cv2.Canny(image, low_threshold, high_threshold)\nimage = image[:, :, None]\nimage = np.concatenate([image, image, image], axis=2)\nimage = Image.fromarray(image)\n#image.save(\"canny.png\")\n\nprompt = [\"Girl with Pearl Earring\",\"Girl with blue cloth\"]\n#prompt = [\"Girl with Pearl Earring\",\"1girl, cute, beautiful face, portrait, cloudy mountain, outdoors, trees, rock, river, (soul card:1.2), highly intricate details, realistic light, trending on cgsociety,neon details, ultra realistic details, global illumination, shadows, octane render, 8k, ultra sharp\"]\nnum_steps = 20\n\nnegative_prompt = [\"monochrome, lowres, bad anatomy, worst quality, low quality\",\"monochrome, lowres, bad anatomy, worst quality, low quality\"]\n#negative_prompt = [\"monochrome, lowres, bad anatomy, worst quality, low quality\",\"3d, cartoon, lowres, bad anatomy, bad hands, text, error\"]\n\n\nnp.random.seed(42)\nstart = time.time()\nresults = ov_pipe(prompt, image, num_steps, negative_prompt)\nend = time.time()-start\nprint(\"Inference time({}its): {} s\".format(num_steps,end))\n\nfor i in range(len(results)):\n results[i].save(\"result\"+str(i)+\".png\")\n"
}
] | 3 |
orix-software/orix-software.github.io
|
https://github.com/orix-software/orix-software.github.io
|
7a693196a2f47edccf99fe1c8544f127c40284f8
|
71a22efbb679d40f70b3a0a94124caf90dd9943c
|
9904c9e87ade00474e75fa526d371e3e6e2cab0a
|
refs/heads/master
| 2023-07-09T07:53:36.220689 | 2023-06-28T20:38:31 | 2023-06-28T20:38:31 | 154,180,005 | 4 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6721311211585999,
"alphanum_fraction": 0.6721311211585999,
"avg_line_length": 15.590909004211426,
"blob_id": "e90f93a7027307204a48ecfd9ae6240c63b1ec78",
"content_id": "66cfb8cc02892e4dc149e8dd3f055e6468c343cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 366,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 22,
"path": "/docs/commands/untar.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: untar\n\n### untar utility\n\n## SYNOPSYS\n+ untar -t[v]f tarfile\n+ untar -x[v]f tarfile\n\n## EXAMPLES\n+ untar -tf /home/test.tar\n\n## DESCRIPTION\n**untar** list and extract files in a TAR archive\n\n## OPTIONS\n* -h ^Bshow this help message and exit\n* -t ^Blist files\n* -x ^Bextract files\n* -v ^Bverbose mode\n\n## SOURCE\nhttps://github.com/orix-software/untar\n\n"
},
{
"alpha_fraction": 0.5365853905677795,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 7,
"blob_id": "ca240edfd3302a884057c76a8e82b99c960d571b",
"content_id": "01f3ae152c4e16804013206d537848ee49af7271",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 5,
"path": "/docs/commands/asm2k2.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# asm2k2 \n\nlaunch asm2k2 demo\n\n/#asm2k2\n\n"
},
{
"alpha_fraction": 0.6084624528884888,
"alphanum_fraction": 0.6096543669700623,
"avg_line_length": 23.42424201965332,
"blob_id": "87dc8a469423e30fa83385c2f800fa7f99ca9d0d",
"content_id": "3895d9b27081f4337e59c20582e4d22f01f1cd29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1678,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 66,
"path": "/docs/developer_manual/orixsdk_macros/overview.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Summary\r\n\r\n```markmap\r\n\r\n## Args from commandline\r\n\r\n* [initmainargs](../initmainargs) : get command line and build argv/argc\r\n* [getmainargs](../getmainargs) : get an argv from xmainargs struct\r\n\r\n## Numbers\r\n\r\n* [itoa](../itoa) : convert a number into decimal string\r\n* [tohex](../tohex) : convert a number to hexadecimal and printit\r\n* [atoi](../atoi) : Convert a string into a number (16 bits)\r\n\r\n## Memory\r\n\r\n* [malloc](../malloc) : allocate memory from main memory\r\n* [mfree](../mfree) : free memory\r\n* [memdecal](../memdecal) : move memory\r\n* [memmove](../memmove) : move memory\r\n\r\n## Files\r\n\r\n* [fopen](../fopen) : Open a file\r\n* [fwrite](../fwrite) : Write bytes to file\r\n* [fread](../fread) : read bytes from file\r\n* [fclose](../fclose) : Close file\r\n* [fseek](../fseek) : Seek into files\r\n* [mkdir](../mkdir) : Create a folder\r\n* [getcwd](../getcwd) : get current path\r\n* [chdir](../chdir) : change current path\r\n* [unlink](../unlink) : remove file\r\n\r\n## Text mode\r\n\r\n* [cputc](../cputc) : Display a char\r\n* [print](../print) : Display a string\r\n* [prints](../prints) : Display a string\r\n* [print_int](../print_int) : print an int\r\n* [crlf](../crlf) : Returns to the next line\r\n* [scroll](../scroll) : scroll from bottom to top, or top to bottom\r\n* [cursor](../cursor) : Switch on/off cursor\r\n\r\n## Graphic/text mode\r\n\r\n* [setscreen](../setscreen) : start graphic mode\r\n\r\n## Execute\r\n\r\n* [exec](../exec) : Execute binary\r\n\r\n## Keyboard\r\n\r\n* [cgetc](../cgetc) : Get a char from keyboard (wait for a key)\r\n\r\n## Sound\r\n\r\n* [ping](../ping) : ping sound\r\n* [zap](../zap) : zap sound\r\n* [explode](../explode) : explode sound\r\n* [shoot](../shoot) : shoot sound\r\n\r\n\r\n\r\n```\r\n"
},
{
"alpha_fraction": 0.56623375415802,
"alphanum_fraction": 0.5766233801841736,
"avg_line_length": 11.275861740112305,
"blob_id": "215fc65cb50665e14772f87fa89ab267f69045f4",
"content_id": "7cd23718e99f68c55809dcf39908d4d33681c4b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 29,
"path": "/docs/developer_manual/orixsdk_macros/cputc.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# cputc\r\n\r\n## Description\r\n\r\nDisplays a char\r\n\r\n## usage\r\n\r\n- cputc\r\n- cputc n\r\n- cputc 'c'\r\n\r\nnote:\r\n\r\n- no parameter: use the value of A register\r\n\r\nAlias for: [print](print) #'c'\r\n\r\n## example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_conio.mac\"\r\n\r\n cputc char\r\n rts\r\n```\r\n\r\nCall [XWR0](../../../kernel/primitives/XWR0/) kernel function.\r\n"
},
{
"alpha_fraction": 0.554959774017334,
"alphanum_fraction": 0.5710455775260925,
"avg_line_length": 10.862069129943848,
"blob_id": "3b4150b8ee5d611741b9a6ce09ec1b9e0c639225",
"content_id": "89da0817ff07f778d8d1f1b959894137eeb79a97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 373,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 29,
"path": "/docs/developer_manual/orixsdk_macros/itoa.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# itoa macro\r\n\r\n## Description\r\n\r\nconvert a number into string\r\n\r\n## Usage\r\n\r\nitoa [ptr], [len], [char]\r\n\r\nptr may be: (ptr), address\r\n\r\n## Output\r\n\r\nN/A\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"SDK_misc.mac\"\r\n\r\n itoa mynumber,2,2\r\n rts\r\nmynumber:\r\n .byte 12\r\n```\r\n\r\nCall [XBINDX](../../../kernel/primitives/xbindx/) kernel function.\r\n"
},
{
"alpha_fraction": 0.33476394414901733,
"alphanum_fraction": 0.4163089990615845,
"avg_line_length": 9.1304349899292,
"blob_id": "052b3dea83c8c87711c8b8addc9de0e9dd5c6dd2",
"content_id": "aa1efda63edd97e1beff6829f467e53440a48805",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 23,
"path": "/docs/tools_docs/vi/Files/dir_834496eb029ed14441e8790c53896f5f.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /data/vi\n\n---\n\n# /data/vi\n\n\n\n## Files\n\n| Name |\n| -------------- |\n| **[/data/vi/strings.s](Files/strings_8s.md#file-strings.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6605113744735718,
"alphanum_fraction": 0.7144886255264282,
"avg_line_length": 25.037036895751953,
"blob_id": "99784bc2aefa02aa52049a01e1deb63975260289",
"content_id": "383fdafa5fc15b8257d3281a5851b0d93d60ef3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 704,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 27,
"path": "/docs/hardware/i_o.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# I/O\n\nThe I/O ports can be see also when you type\n\n``` bash\n\n/#ioports\n\n```\n\n## $321 register for the id of the bank to switch\n\nb0, b1, b2 which can select the id of the bank wanted.\n\n## $314 register\n\nThis register is in firmware 2, and can mirror $314 calls to change to overlay memory\n\n## $342 : twilighte board register\n\n* b0,b1,b2 version number is equal to 1 or 2, but can be overwritten (There is not read only protection\n\n* b5 : if it's equal to 0 if we are on eeprom set, 1 if it's ram set\n\n## $343 : Twilighte banking register\n\nThis register can contains values from 0 to 7 included. It defines the \"set\" of 64KB which be present in slot 4,3,2 and 1 of register $321 which contains the bank.\n\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7179023623466492,
"avg_line_length": 22.04347801208496,
"blob_id": "64b01dfddd9b7e5232845ef312039eebf464417f",
"content_id": "15580742988ea76e0c9c00707b1bf72fc8b96c38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 23,
"path": "/docs/commands/ftdos.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Ftdos\r\n\r\n## Introduction\r\n\r\nftdos command starts ftdos .dsk file without jasmin floppy disc controler. It's a beta version.\r\n\r\n## Usage\r\n\r\nTo start a .dsk file\r\n\r\n/#ftdos mydsk\r\n\r\nTo display the ftdos command version :\r\n\r\n/#ftdos -v\r\n\r\nftdos disks must be present in /home/basic11/dsk/\r\n\r\n## Informations\r\n\r\n* Write on .dsk file is not supported\r\n* refers to ftdos manual for usages\r\n* you need to switch off/on the oric when you reset into ftdos mode, because there is a bug when we reset during ftdos mode. Ftdos does not load and reboot into Orix\r\n"
},
{
"alpha_fraction": 0.5927419066429138,
"alphanum_fraction": 0.6693548560142517,
"avg_line_length": 13.529411315917969,
"blob_id": "07bd7092f874e3ffdb2d18adee2d9ec418d460f4",
"content_id": "aacb42569240111d48ad3af363e96d7bdf4df1fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 248,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 17,
"path": "/docs/hardware/eeprom_structure.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# eeprom structure\n\n112KB\n\nThe eeprom structure is (minimum requirement to boot):\n\n* First bank (16KB) : empty\n\n* First bank (16KB) : empty\n\n* First bank (16KB) : empty\n\n* First bank (16KB) : empty\n\n* Shell (16KB)\n* basic11 (16KB)\n* Kernel (16KB)\n\n"
},
{
"alpha_fraction": 0.5519999861717224,
"alphanum_fraction": 0.7059999704360962,
"avg_line_length": 82.33333587646484,
"blob_id": "aee8ef92eba7754d4c2d45abf2cca4fd8f51eb29",
"content_id": "c1226cf4b6eeaa3b09b675cb9e77e8b43b2e4557",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 500,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 6,
"path": "/doxygen/doc/html/search/functions_0.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['vi_5fkey_5fdown_148',['vi_key_down',['../vi__key__down_8s.html#a1bcbd00ec21382f851aa07bd150bc98f',1,'vi_key_down.s']]],\n ['vi_5fypos_5fscreen_5fplus_5fplus_149',['vi_ypos_screen_plus_plus',['../vi__ypos__screen__plus__plus_8s.html#afc0b96c53386e4c2e026f95427675d33',1,'vi_ypos_screen_plus_plus.s']]],\n ['vi_5fypos_5fscreen_5fsub_5fsub_150',['vi_ypos_screen_sub_sub',['../vi__ypos__screen__sub__sub_8s.html#ac8b8fb96fac83eef9bb4e1fc0fcdee4a',1,'vi_ypos_screen_sub_sub.s']]]\n];\n"
},
{
"alpha_fraction": 0.4881141185760498,
"alphanum_fraction": 0.5261489748954773,
"avg_line_length": 13.674418449401855,
"blob_id": "bdc7243ec89f049b8ec3d0317a33ccd347d1c69b",
"content_id": "3970e75c451cca31412a635dd4fbc4c917a1e47d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 631,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 43,
"path": "/docs/tools_docs/vi/Files/vi__ptr__file__used__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ptr_file_used_plus_plus.s\n\n---\n\n# vi_ptr_file_used_plus_plus.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_file_used_plus_plus](Files/vi__ptr__file__used__plus__plus_8s.md#Routine-vi-ptr-file-used-plus-plus)** |\n\n\n## Routine documentation\n\n### Routine vi_ptr_file_used_plus_plus\n\n```ca65\nvi_ptr_file_used_plus_plus\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_ptr_file_used_plus_plus\n inc vi_ptr_file_used\n bne @out\n inc vi_ptr_file_used+1\n@out:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5255952477455139,
"alphanum_fraction": 0.5392857193946838,
"avg_line_length": 17.66666603088379,
"blob_id": "d66f89e2cbe139a073ee850804fac424f99da69c",
"content_id": "de8f713fdff7dc310e353ceae2e6f07faf829b38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1680,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 90,
"path": "/doxygen/doxybook_output/Files/vi__init__vi__struct_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_init_vi_struct.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_init_vi_struct.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_init_vi_struct](Files/vi__init__vi__struct_8s.md#function-vi-init-vi-struct)**() |\n\n\n## Functions Documentation\n\n### function vi_init_vi_struct\n\n```cpp\nvi_init_vi_struct()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_init_vi_struct\n\n; init struct to 0\n jsr vi_set_xpos_0\n\n ldy #vi_struct_data::ypos_screen\n sta (vi_struct),y\n\n ldy #vi_struct_data::xpos_text\n sta (vi_struct),y\n\n ldy #vi_struct_data::ypos_text\n sta (vi_struct),y\n\n ldy #vi_struct_data::xpos_command_line\n sta (vi_struct),y\n\n ldy #vi_struct_data::pos_file_addr\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n\n ldy #vi_struct_data::pos_file\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n\n ldy #vi_struct_data::posx_command_line\n sta (vi_struct),y\n\n ldy #vi_struct_data::name_file_open\n sta (vi_struct),y\n\n ldy #vi_struct_data::length_file\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n\n ldy #vi_struct_data::ptr_last_char_file\n lda #$00\n sta (vi_struct),y\n iny\n sta (vi_struct),y\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7179487347602844,
"alphanum_fraction": 0.7179487347602844,
"avg_line_length": 6.800000190734863,
"blob_id": "5f414c0f9ba8727965d00e76c7bb0d6d6bc971d9",
"content_id": "37c55004437e41bbbd6663c5dcf50b0b5fac98c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 39,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 5,
"path": "/docs/commands/zerofx.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# ZeroFX demo\n\nLaunch zerofx\n\n/#zerofx\n"
},
{
"alpha_fraction": 0.48132336139678955,
"alphanum_fraction": 0.5048025846481323,
"avg_line_length": 14.032085418701172,
"blob_id": "cc12d84a7a3e32d6a46dece7af2751bb5526b6f8",
"content_id": "03080b019a099a63d30b19cf6bde7b8a7e4d6fbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2811,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 187,
"path": "/doxygen/doxybook_output_vi/Files/vi__key__down_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_key_down.s\n\n---\n\n# vi_key_down.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_key_down](Files/vi__key__down_8s.md#Routine-vi-key-down)**<br>Key down management. |\n\n\n## Routine documentation\n\n### Routine vi_key_down\n\n```ca65\nvi_key_down\n```\n\nKey down management. \n\n[vi_strlen_current_line]\n\n\n\n\n## Source code\n\n```ca65\n;; Key down management\n;\n;@link\n;vi_strlen_current_line\n;@endlink\n;;\n\n.proc vi_key_down\n jsr vi_editor_switch_off_cursor\n ; Checking if we can go below\n\n ; if eof then goto eof\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n ; x=0\n jsr vi_set_xpos_0\n\n ; y++\n jsr vi_ypos_screen_plus_plus\n ; Last line ?\n cmp #IS_LAST_LINE_OF_SCREEN_TEXT\n bne @continue_down\n\n scroll up, 0, 26 ; Yes scroll\n\n\n\n@search_eof:\n ldx #$00\n@L100:\n ; ptr_file++\n\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @display_line\n\n jsr vi_vi_ptr_file_used_plus_plus\n inx\n cpx #VI_EDITOR_MAX_COLUMN\n beq @display_line\n jmp @L100\n\n@eof:\n\n rts\n\n@display_line:\n\n@start_display_line:\n iny\n jsr vi_check_0A\n\n lda vi_ptr_file_used\n ldy vi_ptr_file_used+1\n\n jsr vi_fill_last_line ; And displays the line\n\n jsr vi_check_0A\n\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n jsr vi_check_if_previous_line_was_truncated\n\n rts\n@exit_display_line:\n\n@exit_near:\n rts\n\n@continue_down:\n ldx #$00\n@L1:\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @found_0D\n\n@not_eof2:\n inx\n cpx #VI_EDITOR_MAX_COLUMN+1\n bne @not_eol\n jsr vi_set_xpos_0\n\n ; y++\n jsr vi_ypos_screen_plus_plus\n ldx #$00\n\n@not_eol:\n jsr vi_vi_ptr_file_used_plus_plus\n jmp @L1\n\n ; Checking if eof\n\n@found_0D:\n jsr vi_vi_ptr_file_used_plus_plus\n\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #LF\n bne @S20\n jsr vi_vi_ptr_file_used_plus_plus\n\n@S20:\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #LF\n bne @not_eof\n\n jsr vi_vi_ptr_file_used_plus_plus\n\n jmp @S20\n\n@not_eof:\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n cmp #$00\n beq @no_compute\n tax\n@add:\n jsr vi_vi_ptr_file_used_plus_plus\n dex\n bne @add\n\n@no_compute:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6893396973609924,
"alphanum_fraction": 0.7187748551368713,
"avg_line_length": 21.446428298950195,
"blob_id": "31125eb88ddbb5a01d27d92338311849db0159f3",
"content_id": "481879b1d4aeac78bac0dd5e802bcc0c35016e98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2514,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 112,
"path": "/docs/update/2022_3.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# v2022.3\n\n## Kernel\n\n* [XMKDIR] Fix registers\n\n* [XATN] Arc Tan routine removed\n\n* [XCOS] Cosinus routine removed\n\n* [XSIN] sinus routine removed\n\n* [XLN] Ln routine removed\n\n* [XLOG] Log routine removed\n\n* [XEXP] Exp routine removed\n\n* [XOPEN] Now, WR_ONLY Flag does not create the file. O_CREAT is handled and create the file\n\n* [XFSEEK] now works in 32 bits mode (return EOK if OK, EINVAL if whence is not recognize)\n\n* [XOPEN] [XREAD] [XCLOSE] Allows to open 2 files at the same times\n\n* [XFREE] Fix many bugs\n\n* [Load from device] Add magic token to start any binary without checks\n\n* Can open 2 files at the same times\n\n* Many bugs corrected in XFREE primitive memory management\n\n* FSEEK primitive to move into the opened files\n\n## CC65 (telestrat target)\n\n* [cc65] Fix mkdir bug\n\n* [cc65] now send correct fd for fwrite/fopen/fread to the kernel\n\n* [cc65] kbhit has the right behavior now\n\n## Shell\n\n* Add readline (command line behavior) : insert/replace mode move to end of the line, at the beginning etc (see : sh - Orix (orix-software.github.io) code : Assinie)\n\n* update man pages\n\nReadline : \n\n\n\n## Man\n\n* man removes shell ptr and no others command were working, when man is used without any argument => fixed\n\n* man can displays now more than one screen\n\n## Loader (funct+L)\n\n* Displays the version of the loader\n\n* When we press space the informations(and comments) of the software is displayed. Up and down arrows are availables to navigate into this page (788 games, 60 roms, 35 demos, 142 tools, 80 musics)\n\n* When a letter is pressed, it goes to the first entry of this letter.\n\n\n\n## New command line tool : grep\n\nGrep search pattern into file (grep is a tool to find pattern into file.)\n\n\n\n## New command line tool : untar\n\ncan untar tar files\n\n\n\n## New command line tool : submit\n\nsubmit can launch script file(Submit is a script management system in order to launch command like a shell script)\n\n\n\n## How to upgrade\n\n* Download https://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz\n\n* gunzip/untar sdcard.tgz on the twilighte board device\n* type in the orix shell :\n - cd /usr/share/carts/2022.3\n\nif the default device is a usb key\n\n```bash\n/# orixcfg -r -s 4 kernelus.r64\n```\n\nif the default device is a sdcard\n\n```bash\n/#orixcfg -r -s 4 kernelsd.r64\n```\n\n## Convertion\n\n* Pushing the envelop\n* born in 1983\n* Blake's 7\n* Oric tech ...\n"
},
{
"alpha_fraction": 0.7437751293182373,
"alphanum_fraction": 0.7453815340995789,
"avg_line_length": 39.43333435058594,
"blob_id": "23e045e28d0a4f009418f8f10db646c831028a0e",
"content_id": "c2c510a736aabef748ca8bcb1653b69bde4100c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1245,
"license_type": "no_license",
"max_line_length": 228,
"num_lines": 30,
"path": "/docs/developer_manual/buildstandalonerom.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Build a standalone ROM\r\n\r\n## Prerequisites\r\n\r\nBefore starting :\r\n\r\n* Read [Oricutron VS Real hardware](oricutronvsreal.md)\r\n\r\n## Guidelines\r\n\r\nA standalone ROM is a ROM which does not need the kernel and manage the fully main memory.\r\n\r\nIt's not the main choice when a ROM is built, but it's used to handle easily compatibility for \"Legacy roms\" as Atmos Rom, Oric-1 and others customs roms released.\r\n\r\nIt means that all code to manage files, joysticks, text I/O, Hires I/O must be inserted in this rom.\r\n\r\nThis mode must avoided because it won't handle evolution handled in the kernel and the Twilighte board\r\n\r\n## Access to sdcard and usbdrive\r\n\r\nOrix and kernel handles devices : usbdrive (Usbkey) and sdcard. When a standalone rom is built, it must keep default device, in that case, it requires to ask to the kernel which is the default device before flushing main memory.\r\n\r\n## Launching a standalone ROM\r\n\r\nIf you are on Oricutron, you can put in bank 7, your rom and start Oricutron in twilighte board mode.\r\nOn real computer, you need to\r\n\r\n* copy your ROM into a folder (ex : /usr/share/myroms/myrom.rom)\r\n* Modify /etc/systemd/banks.cnf to add a new rom entry\r\n* Type funct + L and you will be able to start your standalone ROM\r\n\r\n"
},
{
"alpha_fraction": 0.47310683131217957,
"alphanum_fraction": 0.5364004969596863,
"avg_line_length": 194.33766174316406,
"blob_id": "207270ab28d28a6e971fc40439803a0d2c4e1412",
"content_id": "7ca84f2fd6236f0012f6e6197b701e19fb70b25a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 15054,
"license_type": "no_license",
"max_line_length": 12033,
"num_lines": 77,
"path": "/doxygen/kernel/xink_8asm_source.html",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/xhtml;charset=UTF-8\"/>\n<meta http-equiv=\"X-UA-Compatible\" content=\"IE=9\"/>\n<meta name=\"generator\" content=\"Doxygen 1.8.13\"/>\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"/>\n<title>Orix : Kernel: /home/jede/oric/kernel/src/functions/text/xink.asm Source File</title>\n<link href=\"tabs.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<script type=\"text/javascript\" src=\"jquery.js\"></script>\n<script type=\"text/javascript\" src=\"dynsections.js\"></script>\n<link href=\"search/search.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<script type=\"text/javascript\" src=\"search/searchdata.js\"></script>\n<script type=\"text/javascript\" src=\"search/search.js\"></script>\n<link href=\"doxygen.css\" rel=\"stylesheet\" type=\"text/css\" />\n</head>\n<body>\n<div id=\"top\"><!-- do not remove this div, it is closed by doxygen! -->\n<div id=\"titlearea\">\n<table cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr style=\"height: 56px;\">\n <td id=\"projectalign\" style=\"padding-left: 0.5em;\">\n <div id=\"projectname\">Orix : Kernel\n </div>\n </td>\n </tr>\n </tbody>\n</table>\n</div>\n<!-- end header part -->\n<!-- Generated by Doxygen 1.8.13 -->\n<script type=\"text/javascript\">\nvar searchBox = new SearchBox(\"searchBox\", \"search\",false,'Search');\n</script>\n<script type=\"text/javascript\" src=\"menudata.js\"></script>\n<script type=\"text/javascript\" src=\"menu.js\"></script>\n<script type=\"text/javascript\">\n$(function() {\n initMenu('',true,false,'search.php','Search');\n $(document).ready(function() { init_search(); });\n});\n</script>\n<div id=\"main-nav\"></div>\n<!-- window showing the filter options -->\n<div id=\"MSearchSelectWindow\"\n onmouseover=\"return searchBox.OnSearchSelectShow()\"\n onmouseout=\"return searchBox.OnSearchSelectHide()\"\n onkeydown=\"return searchBox.OnSearchSelectKey(event)\">\n</div>\n\n<!-- iframe showing the search results (closed by default) -->\n<div id=\"MSearchResultsWindow\">\n<iframe src=\"javascript:void(0)\" frameborder=\"0\" \n name=\"MSearchResults\" id=\"MSearchResults\">\n</iframe>\n</div>\n\n<div id=\"nav-path\" class=\"navpath\">\n <ul>\n<li class=\"navelem\"><a class=\"el\" href=\"dir_68267d1309a1af8e8297ef4c3efbcdba.html\">src</a></li><li class=\"navelem\"><a class=\"el\" href=\"dir_e823141bb13a34caac0c96ccd0d33fcf.html\">functions</a></li><li class=\"navelem\"><a class=\"el\" href=\"dir_c5919dd578b1c6915af72ff005460d39.html\">text</a></li> </ul>\n</div>\n</div><!-- top -->\n<div class=\"header\">\n <div class=\"headertitle\">\n<div class=\"title\">xink.asm</div> </div>\n</div><!--header-->\n<div class=\"contents\">\n<div class=\"fragment\"><div class=\"line\"><a name=\"l00001\"></a><span class=\"lineno\"> 1</span> </div><div class=\"line\"><a name=\"l00002\"></a><span class=\"lineno\"> 2</span> .proc XINK_ROUTINE</div><div class=\"line\"><a name=\"l00003\"></a><span class=\"lineno\"> 3</span>  sec</div><div class=\"line\"><a name=\"l00004\"></a><span class=\"lineno\"> 4</span> </div><div class=\"line\"><a name=\"l00005\"></a><span class=\"lineno\"> 5</span> ; FIXE LA COULEUR DE FOND OU DU TEXTE </div><div class=\"line\"><a name=\"l00006\"></a><span class=\"lineno\"> 6</span> </div><div class=\"line\"><a name=\"l00007\"></a><span class=\"lineno\"> 7</span> ;Principe:A contient la couleur, X la fenêtre ou 128 si mode HIRES et C=1 si la </div><div class=\"line\"><a name=\"l00008\"></a><span class=\"lineno\"> 8</span> ;couleur est pour l<span class=\"stringliteral\">'encre, 0 pour le fond. </span></div><div class=\"line\"><a name=\"l00009\"></a><span class=\"lineno\"> 9</span> <span class=\"stringliteral\">; Changer la couleur consiste à remplir la colonne couleur correspondante</span></div><div class=\"line\"><a name=\"l00010\"></a><span class=\"lineno\"> 10</span> <span class=\"stringliteral\">; avec le code de couleur. Auncun test de validit? n'</span>étant fait, on peut </div><div class=\"line\"><a name=\"l00011\"></a><span class=\"lineno\"> 11</span> ; utiliser ce moyen pour remplir les colonnes 0 et 1 de n<span class=\"stringliteral\">'importe quel </span></div><div class=\"line\"><a name=\"l00012\"></a><span class=\"lineno\"> 12</span> <span class=\"stringliteral\">; attribut. </span></div><div class=\"line\"><a name=\"l00013\"></a><span class=\"lineno\"> 13</span> <span class=\"stringliteral\"></span></div><div class=\"line\"><a name=\"l00014\"></a><span class=\"lineno\"> 14</span> <span class=\"stringliteral\"> pha ; on sauve la couleur </span></div><div class=\"line\"><a name=\"l00015\"></a><span class=\"lineno\"> 15</span> <span class=\"stringliteral\"> php ; et C </span></div><div class=\"line\"><a name=\"l00016\"></a><span class=\"lineno\"> 16</span> <span class=\"stringliteral\"> stx RES ; fenêtre dans RES </span></div><div class=\"line\"><a name=\"l00017\"></a><span class=\"lineno\"> 17</span> <span class=\"stringliteral\"> bit RES ; HIRES ? </span></div><div class=\"line\"><a name=\"l00018\"></a><span class=\"lineno\"> 18</span> <span class=\"stringliteral\"> bmi @S4 ; oui ---------------------------------------------- </span></div><div class=\"line\"><a name=\"l00019\"></a><span class=\"lineno\"> 19</span> <span class=\"stringliteral\"> stx SCRNB ; TEXT, on met le num?ro de fen?tre dans $28 I</span></div><div class=\"line\"><a name=\"l00020\"></a><span class=\"lineno\"> 20</span> <span class=\"stringliteral\"> bcc @S2 ; si C=0, c'</span>est PAPER I </div><div class=\"line\"><a name=\"l00021\"></a><span class=\"lineno\"> 21</span>  sta SCRCT,X ; on stocke la couleur d<span class=\"stringliteral\">'encre I</span></div><div class=\"line\"><a name=\"l00022\"></a><span class=\"lineno\"> 22</span> <span class=\"stringliteral\"> bcs @S1 ; si C=1 c'</span>est INK I </div><div class=\"line\"><a name=\"l00023\"></a><span class=\"lineno\"> 23</span> @S2:</div><div class=\"line\"><a name=\"l00024\"></a><span class=\"lineno\"> 24</span>  sta SCRCF,X ; ou la couleur de fond </div><div class=\"line\"><a name=\"l00025\"></a><span class=\"lineno\"> 25</span> @S1:</div><div class=\"line\"><a name=\"l00026\"></a><span class=\"lineno\"> 26</span>  lda FLGSCR,X ; est on en 38 colonnes ? I</div><div class=\"line\"><a name=\"l00027\"></a><span class=\"lineno\"> 27</span>  and #$10 ; I</div><div class=\"line\"><a name=\"l00028\"></a><span class=\"lineno\"> 28</span>  bne @S3 ; mode 38 colonnes ------------------------------ I</div><div class=\"line\"><a name=\"l00029\"></a><span class=\"lineno\"> 29</span>  lda #$0C ; mode 40 colonnes, on efface l<span class=\"stringliteral\">'?cran I I</span></div><div class=\"line\"><a name=\"l00030\"></a><span class=\"lineno\"> 30</span> <span class=\"stringliteral\"> jsr Ldbb5 ; (on envoie CHR$(12)) I I </span></div><div class=\"line\"><a name=\"l00031\"></a><span class=\"lineno\"> 31</span> <span class=\"stringliteral\"> lda #$1D ; et on passe en 38 colonnes I I</span></div><div class=\"line\"><a name=\"l00032\"></a><span class=\"lineno\"> 32</span> <span class=\"stringliteral\"> jsr Ldbb5 ; (on envoie CHR$(29)) I I </span></div><div class=\"line\"><a name=\"l00033\"></a><span class=\"lineno\"> 33</span> <span class=\"stringliteral\"> ldx SCRNB ; on prend X=numéro de fen?tre I I</span></div><div class=\"line\"><a name=\"l00034\"></a><span class=\"lineno\"> 34</span> <span class=\"stringliteral\">@S3:</span></div><div class=\"line\"><a name=\"l00035\"></a><span class=\"lineno\"> 35</span> <span class=\"stringliteral\"> lda SCRDY,X ; on prend la ligne 0 de la fenêtre <------------ I</span></div><div class=\"line\"><a name=\"l00036\"></a><span class=\"lineno\"> 36</span> <span class=\"stringliteral\"> jsr XMUL40_ROUTINE ; *40 dans RES I </span></div><div class=\"line\"><a name=\"l00037\"></a><span class=\"lineno\"> 37</span> <span class=\"stringliteral\"> lda SCRBAL,X ; AY=adresse de base de la fenêtre I</span></div><div class=\"line\"><a name=\"l00038\"></a><span class=\"lineno\"> 38</span> <span class=\"stringliteral\"> ldy SCRBAH,X ; I</span></div><div class=\"line\"><a name=\"l00039\"></a><span class=\"lineno\"> 39</span> <span class=\"stringliteral\"> jsr XADRES_ROUTINE ; on ajoute l'</span>adresse à RES (ligne 0 *40) dans RES I </div><div class=\"line\"><a name=\"l00040\"></a><span class=\"lineno\"> 40</span>  ldy SCRDX,X ; on prend la première colonne de la fenêtre I</div><div class=\"line\"><a name=\"l00041\"></a><span class=\"lineno\"> 41</span>  dey ; on enlève deux colonnes I</div><div class=\"line\"><a name=\"l00042\"></a><span class=\"lineno\"> 42</span>  dey ; I</div><div class=\"line\"><a name=\"l00043\"></a><span class=\"lineno\"> 43</span>  sec ; I</div><div class=\"line\"><a name=\"l00044\"></a><span class=\"lineno\"> 44</span>  lda SCRFY,X ; on calcule le nombre de lignes I</div><div class=\"line\"><a name=\"l00045\"></a><span class=\"lineno\"> 45</span>  sbc SCRDY,X ; de la fenêtre I</div><div class=\"line\"><a name=\"l00046\"></a><span class=\"lineno\"> 46</span>  tax ; dans X I</div><div class=\"line\"><a name=\"l00047\"></a><span class=\"lineno\"> 47</span>  inx ; I</div><div class=\"line\"><a name=\"l00048\"></a><span class=\"lineno\"> 48</span>  tya ; colonne 0 dans Y I</div><div class=\"line\"><a name=\"l00049\"></a><span class=\"lineno\"> 49</span>  bcs @S5 ; inconditionnel --------------------------------- I </div><div class=\"line\"><a name=\"l00050\"></a><span class=\"lineno\"> 50</span> @S4:</div><div class=\"line\"><a name=\"l00051\"></a><span class=\"lineno\"> 51</span>  lda #$00 ; <----------------------------------------------+-- FIXME 65C02</div><div class=\"line\"><a name=\"l00052\"></a><span class=\"lineno\"> 52</span>  ldx #$A0 ; I </div><div class=\"line\"><a name=\"l00053\"></a><span class=\"lineno\"> 53</span>  sta RES ; RES=$A000 , adresse HIRES I </div><div class=\"line\"><a name=\"l00054\"></a><span class=\"lineno\"> 54</span>  stx RES+1 ; I </div><div class=\"line\"><a name=\"l00055\"></a><span class=\"lineno\"> 55</span>  ldx #$C8 ; X=200 pour 200 lignes I </div><div class=\"line\"><a name=\"l00056\"></a><span class=\"lineno\"> 56</span>  lda #$00 ; A=0 pour colonne de début = colonne 0 I </div><div class=\"line\"><a name=\"l00057\"></a><span class=\"lineno\"> 57</span> @S5:</div><div class=\"line\"><a name=\"l00058\"></a><span class=\"lineno\"> 58</span>  plp ; on sort C <------------------------------------- </div><div class=\"line\"><a name=\"l00059\"></a><span class=\"lineno\"> 59</span>  adc #$00 ; A=A+C </div><div class=\"line\"><a name=\"l00060\"></a><span class=\"lineno\"> 60</span>  tay ; dans Y </div><div class=\"line\"><a name=\"l00061\"></a><span class=\"lineno\"> 61</span>  pla ; on sort le code *</div><div class=\"line\"><a name=\"l00062\"></a><span class=\"lineno\"> 62</span> @S7:</div><div class=\"line\"><a name=\"l00063\"></a><span class=\"lineno\"> 63</span>  sta (RES),Y; -->on le place dans la colonne correspondante </div><div class=\"line\"><a name=\"l00064\"></a><span class=\"lineno\"> 64</span>  pha ; I on le sauve </div><div class=\"line\"><a name=\"l00065\"></a><span class=\"lineno\"> 65</span>  clc ; I </div><div class=\"line\"><a name=\"l00066\"></a><span class=\"lineno\"> 66</span>  lda RES ; I on passe 28 colonnes </div><div class=\"line\"><a name=\"l00067\"></a><span class=\"lineno\"> 67</span>  adc #$28 ;I (donc une ligne) </div><div class=\"line\"><a name=\"l00068\"></a><span class=\"lineno\"> 68</span>  sta RES ;I </div><div class=\"line\"><a name=\"l00069\"></a><span class=\"lineno\"> 69</span>  bcc @S6 ; I </div><div class=\"line\"><a name=\"l00070\"></a><span class=\"lineno\"> 70</span>  inc RES+1 ; I </div><div class=\"line\"><a name=\"l00071\"></a><span class=\"lineno\"> 71</span> @S6:</div><div class=\"line\"><a name=\"l00072\"></a><span class=\"lineno\"> 72</span>  pla ; I on sort le code </div><div class=\"line\"><a name=\"l00073\"></a><span class=\"lineno\"> 73</span>  dex ; I on compte X lignes </div><div class=\"line\"><a name=\"l00074\"></a><span class=\"lineno\"> 74</span>  bne @S7 ;--- </div><div class=\"line\"><a name=\"l00075\"></a><span class=\"lineno\"> 75</span>  rts ; et on sort----------------------------------------</div><div class=\"line\"><a name=\"l00076\"></a><span class=\"lineno\"> 76</span> .endproc</div></div><!-- fragment --></div><!-- contents -->\n<!-- start footer part -->\n<hr class=\"footer\"/><address class=\"footer\"><small>\nGenerated on Sat Apr 25 2020 11:13:33 for Orix : Kernel by  <a href=\"http://www.doxygen.org/index.html\">\n<img class=\"footer\" src=\"doxygen.png\" alt=\"doxygen\"/>\n</a> 1.8.13\n</small></address>\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6910569071769714,
"avg_line_length": 15.571428298950195,
"blob_id": "fa9036e6c2746112ff278c389b9f3c01ecd55112",
"content_id": "e72a7d01ade167c107dd84a65c965dbd65b4a83a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/kernel/docs/dynamiclink/XCALLFUNCTLIB.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XCALLFUNCTLIB\r\n\r\nCheck if the id of the function is valid ?\r\n\r\nCalls always $c000 of the bank ?\r\n\r\nReturns error code ?\r\n"
},
{
"alpha_fraction": 0.7085156440734863,
"alphanum_fraction": 0.7384718656539917,
"avg_line_length": 111.07546997070312,
"blob_id": "9d0d553b15ac806fd089869bbfa4518ff64e1c9d",
"content_id": "d9d1422cceb866b52be0cdfb441b35b19b33f2e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6065,
"license_type": "no_license",
"max_line_length": 327,
"num_lines": 53,
"path": "/docs/hardware/ch376/vars8.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# CH376 extra vars\n\n## Vars 8\n\n```c\n#define VAR_SYS_BASE_INFO 0x20 / * Informations de base du système actuel * /\n/ * Le bit 6 est utilisé pour indiquer la sous-classe du périphérique de stockage USB SubClass-Code, le bit 6 est 0 pour indiquer que la sous-classe est 6 et le bit 6 est 1 pour indiquer que la sous-classe est différente de 6 * /\n/ * Le bit 5 est utilisé pour indiquer l'état de la configuration USB en mode périphérique USB et l'état de la connexion du périphérique USB en mode hôte USB * /\n/ * En mode périphérique USB, si le bit 5 est 1, la configuration USB est terminée et les bits 5 et 0 ne sont pas configurés * /\n/ * En mode hôte USB, si le bit 5 est 1, il y a un périphérique USB dans le port USB, et si le bit 5 est 0, il n'y a pas de périphérique USB dans le port USB * /\n/ * Le bit 4 est utilisé pour indiquer l'état de verrouillage du tampon en mode périphérique USB. Le bit 4 est 1 signifie que le tampon USB est verrouillé et le bit 6 est 1 signifie qu'il a été libéré * /\n/ * Autres bits, réservés, veuillez ne pas modifier * /\n#define VAR_RETRY_TIMES 0x25 / * Nombre de tentatives d'opération de transaction USB * /\n/ * Si le bit 7 est 0, il ne réessayera pas lorsque NAK est reçu, le bit 7 est 1 et le bit 6 est 0, et réessayera à l'infini lors de la réception de NAK (vous pouvez utiliser la commande CMD_ABORT_NAK pour abandonner la nouvelle tentative), le bit 7 est 1 et 6 est 1 Réessayez au plus 3 secondes lors de la réception de NAK * /\n/ * Bit 5 ~ Bit 0 représente le nombre de tentatives après l'expiration du délai * /\n#define VAR_FILE_BIT_FLAG 0x26 / * Indicateur de bit en mode fichier hôte * /\n/ * Bit 1 et Bit 0, l'indicateur du système de fichiers FAT du disque logique, 00-FAT12, 01-FAT16, 10-FAT32, 11-illégal * /\n/ * Bit 2, indique si les données de la table FAT dans le tampon courant ont été modifiées, 0-non modifié, 1-modifié * /\n/ * Bit 3, la longueur du fichier doit être modifiée, le fichier actuel est ajouté avec les données, 0-Aucune modification n'est pas ajoutée, 1-Ajouté et doit être modifié * /\n/ * Autres bits, réservés, veuillez ne pas modifier * /\n#define VAR_DISK_STATUS 0x2B / * État du disque et du fichier en mode fichier hôte * /\n#define VAR_SD_BIT_FLAG 0x30 / * Indicateur de bit de la carte SD en mode fichier hôte * /\n/ * Bit 0, version de la carte SD, 0- prend uniquement en charge la première version SD, 1- prend en charge la deuxième version SD * /\n/ * Bit 1, reconnaissance automatique, carte 0-SD, carte 1-MMC * /\n/ * Bit 2, identification automatique, carte SD de capacité standard 0, carte SD 1 grande capacité (HC-SD) * /\n/ * Bit 4, délai d'expiration de la commande ACMD41 * /\n/ * Bit 5, délai d'expiration de la commande CMD1 * /\n/ * Bit 6, délai d'expiration de la commande CMD58 * /\n/ * Autres bits, réservés, veuillez ne pas modifier * /\n#define VAR_UDISK_TOGGLE 0x31 / * L'indicateur de synchronisation du point de terminaison BULK-IN / BULK-OUT du périphérique de stockage USB * /\n/ * Bit 7, indicateur de synchronisation de point de terminaison en bloc * /\n/ * Bit 6, indicateur de synchronisation de point de terminaison en bloc * /\n/ * Bit 5 ~ Bit 0, doit être 0 * /\n#define VAR_UDISK_LUN 0x34 / * Le numéro d'unité logique du périphérique de stockage USB * /\n/ * Bit 7 ~ Bit 4, le numéro d'unité logique actuel du périphérique de stockage USB, après que CH376 initialise le périphérique de stockage USB, la valeur par défaut est d'accéder à 0 # unité logique * /\n/ * Bit 3 ~ Bit 0, le numéro d'unité logique maximum du périphérique de stockage USB, plus 1 est égal au nombre d'unités logiques * /\n#define VAR_SEC_PER_CLUS 0x38 / * Le nombre de secteurs par cluster du disque logique * /\n#define VAR_FILE_DIR_INDEX 0x3B / * Le numéro d'index des informations de répertoire de fichiers en cours dans le secteur * /\n#define VAR_CLUS_SEC_OFS 0x3C / * Le décalage de secteur du pointeur de fichier actuel dans le cluster, 0xFF pointe vers la fin du fichier, la fin du cluster * /\n\n/ * Variable 32 bits / 4 octets * /\n#define VAR_DISK_ROOT 0x44 / * Pour les disques FAT16, c'est le nombre de secteurs occupés par le répertoire racine, et pour les disques FAT32, c'est le numéro de cluster de départ du répertoire racine (longueur totale 32 bits, octet de poids faible en premier) * /\n#define VAR_DSK_TOTAL_CLUS 0x48 / * Le nombre total de clusters du disque logique (la longueur totale est de 32 bits, octet de poids faible en premier) * /\n#define VAR_DSK_START_LBA 0x4C / * Le numéro de secteur absolu de départ du disque logique LBA (longueur totale 32 bits, octet de poids faible en premier) * /\n#define VAR_DSK_DAT_START 0x50 / * LBA de départ de la zone de données du disque logique (la longueur totale est de 32 bits, octet de poids faible en premier) * /\n#define VAR_LBA_BUFFER 0x54 / * LBA correspondant aux données du tampon de données actuel du disque (longueur totale 32 bits, octet de poids faible en premier) * /\n#define VAR_LBA_CURRENT 0x58 / * L'adresse LBA de départ du disque actuellement lu et écrit (la longueur totale est de 32 bits, octet de poids faible en premier) * /\n#define VAR_FAT_DIR_LBA 0x5C / * L'adresse LBA du secteur où se trouvent les informations du répertoire de fichiers en cours (longueur totale 32 bits, octet de poids faible en premier) * /\n#define VAR_START_CLUSTER 0x60 / * Le numéro de cluster de départ du fichier ou du répertoire (dossier) actuel (longueur totale 32 bits, octet de poids faible en premier) * /\n#define VAR_CURRENT_CLUST 0x64 / * Le numéro de cluster actuel du fichier actuel (la longueur totale est de 32 bits, octet de poids faible en premier) * /\n#define VAR_FILE_SIZE 0x68 / * La longueur du fichier actuel (la longueur totale est de 32 bits, octet de poids faible en premier) * /\n#define VAR_CURRENT_OFFSET 0x6C / * Le pointeur de fichier actuel, le décalage d'octet de la position actuelle de lecture et d'écriture (longueur totale 32 bits, octet de poids faible en premier) * / \n```\n\n\n"
},
{
"alpha_fraction": 0.5481927990913391,
"alphanum_fraction": 0.5956325531005859,
"avg_line_length": 15.195121765136719,
"blob_id": "7649d044b5ce40284e67957d02dbce8e65af5aee",
"content_id": "937caa675b9d42aa3b78e7dc5348a772fd81b8a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1328,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 82,
"path": "/doxygen/doxybook_output_vi/Files/strings_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /data/vi/strings.s\n\n---\n\n# /data/vi/strings.s\n\n\n\n## Attributes\n\n| | Name |\n| -------------- | -------------- |\n| const char[13] | **[msg_insert](Files/strings_8s.md#variable-msg-insert)** |\n| const char[11] | **[msg_nofile](Files/strings_8s.md#variable-msg-nofile)** |\n| char[2] | **[msg_nofilename](Files/strings_8s.md#variable-msg-nofilename)** |\n| char[2] | **[msg_impossibletowrite](Files/strings_8s.md#variable-msg-impossibletowrite)** |\n| const char[8] | **[msg_written](Files/strings_8s.md#variable-msg-written)** |\n\n\n\n## Attributes Documentation\n\n### variable msg_insert\n\n```ca65\nconst char[13] msg_insert = \"-- INSERT --\";\n```\n\n\n### variable msg_nofile\n\n```ca65\nconst char[11] msg_nofile = \"[New File]\";\n```\n\n\n### variable msg_nofilename\n\n```ca65\nchar[2] msg_nofilename;\n```\n\n\n### variable msg_impossibletowrite\n\n```ca65\nchar[2] msg_impossibletowrite;\n```\n\n\n### variable msg_written\n\n```ca65\nconst char[8] msg_written = \"written\";\n```\n\n\n\n## Source code\n\n```ca65\nmsg_insert:\n .asciiz \"-- INSERT --\"\n\nmsg_nofile:\n .asciiz \"[New File]\"\n\nmsg_nofilename:\n .byte 17,\"E32: No file name\",16,0\n\nmsg_impossibletowrite:\n .byte 17,\"E99: Impossible to write\",16,0\n\nmsg_written:\n .asciiz \"written\"\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6452513933181763,
"alphanum_fraction": 0.6927374005317688,
"avg_line_length": 16,
"blob_id": "544b907bc09ff9b7785e18bd303a92db0812f14e",
"content_id": "9da0cdfa9ac02dba0608d88a922200657453f98c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 20,
"path": "/docs/samples/asm_samples/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Samples in Assembly\r\n\r\n## Malloc\r\n\r\nAllocate 4000 bytes and displays a message if the 4000 bytes can't be allocated, else store ptr to myptr3\r\n\r\nmalloc is done with Orix-sdk\r\n\r\n``` ca65\r\n.include \"telestrat.inc\"\r\n.include \"macros/SDK.mac\"\r\n\r\nmyptr3 := userzp\r\n\r\nmalloc 4000,myptr3,str_enomem ; Index ptr\r\nrts\r\nstr_enoemem:\r\n .asciiz \"Out of memory\"\r\n\r\n```"
},
{
"alpha_fraction": 0.5020040273666382,
"alphanum_fraction": 0.5410821437835693,
"avg_line_length": 15.096774101257324,
"blob_id": "1c741f2423789d4212cfbd9062a70fda76ca3cf5",
"content_id": "cc9b2ace7d082ce5830ba8dcb821f9b2e72e34bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 998,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 62,
"path": "/doxygen/doxybook_output_vi/Files/vi__ypos__screen__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ypos_screen_sub_sub.s\n\n---\n\n# vi_ypos_screen_sub_sub.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ypos_screen_sub_sub](Files/vi__ypos__screen__sub__sub_8s.md#Routine-vi-ypos-screen-sub-sub)**<br>Decrement ypos. |\n\n\n## Routine documentation\n\n### Routine vi_ypos_screen_sub_sub\n\n```ca65\nvi_ypos_screen_sub_sub\n```\n\nDecrement ypos. \n\n**See**: [vi_struct](Files/vi_8s.md#variable-vi-struct)\n\n**Return**: A : returns 1 if y=0 or A returns 0 if y!=0 \n\n\n\n## Source code\n\n```ca65\n;; Decrement ypos\n;\n;@return A : returns 1 if y=0 or A returns 0 if y!=0\n;@see vi_struct\n;\n;;\n.proc vi_ypos_screen_sub_sub\n ; ypos_screen=ypos_screen-1\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n beq @no_substract\n sec\n sbc #$01\n\n sta (vi_struct),y\n lda #$00\n rts\n@no_substract:\n lda #$01\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.41436463594436646,
"alphanum_fraction": 0.5138121843338013,
"avg_line_length": 9.647058486938477,
"blob_id": "d0b6101bfd0ee80827da0135aaad2e4006ef58ca",
"content_id": "f555c4ebfa7b521d664096fedec96b8a1fa42e3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 17,
"path": "/doxygen/doxybook_output_vi/Structs.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Classes\n\n---\n\n# Classes\n\n\n\n\n* **struct [vi_struct_data](Structs/structvi__struct__data.md)** \n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7246376872062683,
"alphanum_fraction": 0.7246376872062683,
"avg_line_length": 22,
"blob_id": "7d149bf38420ba73ecb4cc0672f42d202af9e90a",
"content_id": "3a55f942627727901e6481d70c845acb84344ee6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 3,
"path": "/docs/commands/vidplay.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Vidplay\n\nPlay .vhi video (some are located in /usr/share/vidplay/)\n"
},
{
"alpha_fraction": 0.6133311986923218,
"alphanum_fraction": 0.6486492156982422,
"avg_line_length": 25.167821884155273,
"blob_id": "47db8dadf59bc9e9cd9a33fa6768c127cccc0fa5",
"content_id": "288ac9e664e05413e47710950e0269ced726f1b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 50024,
"license_type": "no_license",
"max_line_length": 273,
"num_lines": 1877,
"path": "/pandoc/manual.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Orix/Twilighte Manual\nauthor: v2022.4.1\ndate: rev 09/11/2022\n---\n\n{width=\"17cm\" height=\"12.749cm\"}\n\n\n\\newpage\n\n\n\n\n## EXTRA DOCUMENTATION\n\n===================\n\nRom loader and firmware twilighte board menu\n--------------------------------------------\n\nhttps://github.com/orix-software/systemd/blob/master/docs/pdf/systemd.pdf\n\n[]{#anchor}INTRODUCTION\n=======================\n\n[]{#anchor-1}What is new (v2022.4) ?\n-------------------------------------\n\nhttps://orix-software.github.io/update/2022_4/\n\n[]{#anchor-4}General informations\n---------------------------------\n\nThis documentation must be use when you have installed orix version\n**2022.3** (see at the top of the banner on the oric at boot).\n\nOn [http://orix.oric.org](http://orix.oric.org/), you will have some\nyoutube videos links showiint how to use some functionnality.\n\nThe board has a firmware version. This firmware can be upgarded see\n« Hardware and firmware upgrade » section.\n\nThe board can be upgarded too but you have to send it to upgrade the\nboard see « Hardware and firmware upgrade » section » too.\n\nThe card has a 512KB of eeprom, and 512KB of RAM. This RAM is saved with\na battery. For instance, only bank 4, 3, 2 and 1 can be switched to see\nothers sets. It's a software limit. In the future, you will be able to\ndisplays all bank and starts any binary from theses banks. If you wants\nto change the set, you can use twil command. This command can switch to\neeprom bank or ram bank and can switch to any set.\n\nSome extra devices (TOM2, logitech joypad) are explained a bit in this\nmanual, but it's not adverts, we don't ear anything:) It explains some\nways to use joystick, others hardware exists in the same way)\n\n[]{#anchor-5}Features\n---------------------\n\n- .tap file fast loading (with multitap files)\n\n<!-- -->\n\n- Joysticks support for a lot of games on atmos mode\n- the hobbit, defence-force (and others games) works without any\n patch for loading\n\n- in system : kernel update, roms and ram update (with\n [orixcfg](http://orix.oric.org/command-orixcfg/) binary)\n- 2 DB9 Joysticks (atari)\n- 512KB of EEPROM (banking mode)\n- 512KB of RAM (banking mode)\n- read/write from sdcard (MAX 64GB) or usb drive (mass storage)\n- drag and drop from the PC to the oric : It will be available on the\n oric (with WIFI connexion) : It requires extra hardware with a\n raspberry pi zero\n- fast loading : 46KB per second. A game require less than one second\n to load and start\n- cumulus compatible with the help of an other amplibus board (not\n provided)\n\n\\section{}\n\n[]{#anchor-6}GETTING STARTED\n============================\n\n[]{#anchor-7}Content\n--------------------\n\n{width=\"6.08cm\" height=\"8.107cm\"}\n{width=\"10.509cm\" height=\"7.881cm\"}\n=================================================================================================================================================================================================================================================================================\n\n[]{#anchor-8}Physicals ports\n----------------------------\n\n{width=\"8.326cm\"\nheight=\"4.598cm\"}{width=\"9.181cm\"\nheight=\"4.262cm\"}\n\n[]{#anchor-9}Hardware limits\n----------------------------\n\nThe usb controler manage FAT32 only. Sdcard and usb key must be\nformatted with FAT32 filesystem. If you want to use pi zero gadget\ntrick, you need to do a mkfs to FAT32 file system.\n\nAll tests had been done with samsung evo sdcard and sandisk usb key. A\nlot of sdcard works, and we did not see incompatibility with sdcard.\n\nSdcard controler and usb key controler can work with 32GB storage Max.\nBut it can handle 64 GB sdcard (tested). It can handle larger sdcard/usb\nkey reader, but only 32 and 64 GB devices was used.\n\n[]{#anchor-10}Software limits\n-----------------------------\n\nThe sdcard/usb controler can handle long filename, but Orix handles 8+3\nfilename only.\n\n[]{#anchor-11}Information about joysticks part\n----------------------------------------------\n\nThe left port has only 1 button. The right port has 3 buttons. The\njoystick pinout is atari pinout. You can use standard DB9 joystick. You\ncan also plug « TOM2 » hardware (not provided), it can connect a usb\nmouse or usb joypad (wireless) to theses ports. For example, logitech\njoypad F710 (wireless) works with TOM2.\n\nPlease note that TOM2 can only handle 2 buttons. It means that the third\nbutton can't work with TOM2 connected.\n\n{width=\"17cm\" height=\"3.902cm\"}\n--------------------------------------------------------------------------------------------\n\n{#anchor-12}First boot : Initialize the storage\n-------------------------------------------------------------------------------------------------------------------------------------------------\n\nWhen the card is sent, kernel is built with a default storage. In order\nto know which device is the default one, you can type « mount ». You can\nonly use one device at the same time, but you can swap easily theses\ndevices from command line.\n\nIf you see « sdcard », then sdcard will be read by default. You can\nchange it, with a command : « twil -u », it will switch to usbdrive. If\nyou want to have usb drive by default, you can program kernel with the\ntool « orixcfg ». See Orixcfg section.\n\nNow, if you know which device you will use by default, you can install\nall software on it.\n\nPlug the device on your PC (sdcard or usb key). If you have a pi zero w,\nyou can do this with drag and drop solution from the PC.\n\nDownload sdcard.tgz from this :\n<http://repo.orix.oric.org/dists/official/tgz/6502/>\n\nIt contains all software for orix there is others which are not\navailable in this archive.\n\nNow, use 7zip on your PC (or tar/gzip under linux), and unzip all files\nfrom this sdcard.tgz. Put all theses new files in your device root\nfolder.\n\nNow, you can insert the device (sdcard or usbkey -- or pi zero) in the\ntwilighte board and play.\n\n{width=\"10.659cm\" height=\"7.712cm\"}[]\n\n[]{#anchor-13}Upgrade from v2022.4 to v2022.4.1\n---------------------------------------------\n\nIf your orix version is below v2022.3 version, please go to annexes part at the\nend of this document, before you try to upgrade to v2022.4.1\n\n- Download\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\n or sdcard : It could require some time to copy because there is a\n lot of small files (tap, hlp etc)\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2022.4*\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2022.4\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2022.4\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n\n\n[]{#anchor-14}Optionnal step for upgrade\n----------------------------------------\n\nNow bank displays all banks from l to 64. It means that you should have\nsome strange bank signature for eeprom. Now an empty set is provided in\n*/usr/share/carts/2021.4 *folder. With Orixcfg you can initialize your\nset with this cart. Don't use « -s 4 » flag for orixcfg when you want to\nload emptyset.\n\n[]{#anchor-15}First step : type a command\n-----------------------------------------\n\nYou can access to available command from many ways :\n\n- From /bin folders, there is binary available on current device, 'ls'\n will show you available commands\n- From banks : type « help -b5 » you will see available commands\n\n\n\n\\newpage\n\\center\n\\Huge Commands\n\n\\flushleft\n\n\\normalsize\n\n\\newpage\n\n\n[]{#anchor-26}Basic10 & Basic11\n=====================\n\nLaunch\n------\n\nBasic10 starts Oric-1 rom with sdcard/usb key support\n\nBasic11 starts Atmos rom with sdcard/usb key support\n\nYou can type basic11 or press FUNCT+B to start\n\n[]{#anchor-27}Load a personal .tap file\n---------------------------------------\n\nWhen you starts basic11 commands, the default path is\n*« /home/basic11/ ». Each action on the basic11 mode will be done in\nthis folder (cload/csave). If you cload a tape file, it must be in\n« /home/basic11 » folder.*\n\n{width=\"7.544cm\" height=\"5.447cm\"}\n\nYou have downloaded a .tap file, and want to use it.\nThen, you can create a folder /*home*/basic11/\n\nUnder Orix\n\n/\\#mkdir home\\\n/\\#cd home\\\n/home\\#mkdir basic11\\\n/home\\#cd basic11\n\nPut you file in this folder from your PC, and start basic11 (you don't\nneed to be in the «/home/basic11 » folder to start basic11 with no\nparameter. By default, basic11 starts in « /home/basic11/ »\n\n[]{#anchor-28}Oric.org tape file\n--------------------------------\n\nWhen you downloaded sdcard.tgz and unzip it into sdcard or usbkey\ndevice, there is many tape file included in this archive. You don't need\nto move these type file, if you know the key, you can starts it from\ncommands line. In this case, it will load the correct basic1.1 rom to\nstart the tape file (see below), and the correct joystick configuration\nif it's correct.\n\n[]{#anchor-29}Oric.org tape file update\n---------------------------------------\n\nEach week a new software.tgz is generated. You can download it from\n« repo » and unzip it on the device. It will generate last tape file and\nlast joysticks configuration.\n\n[]{#anchor-30}Search a tape file from command line\n--------------------------------------------------\n\n{width=\"7.304cm\" height=\"5.398cm\"}[]{#anchor-31}\n\nBasic11 has also many.tap files inserted in sdcard.tgz\n\nTry to find the software with option -l\n\n/\\# basic11 -l\n\nIf you find your software, you can do perform **ctrl+c.**\n\nYou can type space to do a pause.\n\nOn that case, you can launch the tape file like :\n\n/\\# basic11 «KEYDISPLAYED\n\nWhen KEYDISPLAYED is the key displayed in key column. Please note that\nthe key must be in **UPPERCASE**\n\n[]{#anchor-32}Load a tap file from command line\n-----------------------------------------------\n\nNote that MYFILE must be in **UPPERCASE**\n\n/\\# basic11 «MYFILE\n\nIf MYFILE is in the oric.org database, it will launch the software with\nthe filename MYFILE.\n\nIf basic11 command does not find MYFILE in the oric.org database, it\nwill try to load it from /home/basic11/ folder.\n\n[]{#anchor-33}Save your program \n--------------------------------\n\n\\\nIf you start « basic11 » with no options, basic rom will starts and each\ncsave (or cload) actions will store files in « /*home/basic11 » folder*\n\n[]{#anchor-34}Start basic11 menu\n--------------------------------\n\n{width=\"7.384cm\"height=\"5.341cm\"}\n\nIf you type « basic11 -g » on command line or FUNCT+G,\nyou will have a menu with all software which have a download link on\noric.org (only atmos version and when a tape file is available).\n\n/\\#basic11 -g\n\nYou can use left and right letters to change to a new letter. If the\nletter is empty, it means that there is no available tap file for this\nletter.\n\nYou can use up and down link to navigate into software. If you press\nenter, the software will starts.\n\nNote that not all games are working yet. Some times, chars are\ncorrupted. If the joysticks does not works, there is two case :\n\n- the game does not call rom routine to manage keyboard\n- keyboard mapping is not done yet\n\nYou can use arrows to navigate into the menu :\n\n- up and down to select the software\n- right and left to switch to the menu letters\n\nSome letters are empty. It means that there is no software with tape\nfile available on oric.org for this letter\n\n[]{#anchor-35}Quit basic11\n--------------------------\n\nIf you want to quit basic11 from interpreter command line, you can type\n« QUIT ». This will force to reboot to Orix (you can also use reset\nbutton)\n\n[]{#anchor-36}How the .tap file starts\n--------------------------------------\n\nIf you only type « basic11 », this will start bank 6 (normal basic rom).\nThe default folder in that case is «/*home/basic11 »*\n\nIf you type « basic11 » with a tape file as an argument, there is 2\ncases\n\n1. The tape file (key) is already known in oric.org website, then\n basic11 try to find it in its databank file (/var/cache/basic11/\n folder). If the key is found, it will start the tape file located in\n «/usr/share/basic11/\\... »\n2. If the key is unknown, it will try to find it in «/home/basic11 »\n\nIf the tap file is in the oric.org db file, basic11 will load the\nsoftware configuration from the db software file (as joystick\nconfiguration, and the id of the rom). Basic11 load the right rom into\nram bank, override the default basic11 path to the tape file folder\n(« *usr/share/basic11/\\[firstletter software\\]. *\n\nIt means that if you load this kind of software and you can quit the\nsoftware, each file action in basic11 rom, will be performed in\n« usr/share/basic11/\\[firstletter software\\]. »\n\n[]{#anchor-37}Not working tapes (for instance)\n----------------------------------------------\n\n- All Oric-1 games can be started with FUNCT+L in ROM menu : start\n oric-1 (depending of your device), and put .tap files in\n /home/basic10\n- Software which does not work (25), but the number can be reduced in\n future release.\n\n ----------------------------- --------------------- ------------------------------------------------------------\n cobra Cobra pinball Damsel in distress\n Rush hour 4K\n Le diamant de l'ile maudite Durendal HU\\*BERT\n Hunchback Schtroumpfs Stanley (ROM 0,1 tested)\n Them Titan Visif\n Xenon III Dig Dog Elektro Storm\n Kilburn Encounter Le tresor du pirate L'aigle d'or (ROM 0,1 tested)\n Compatible (micropuce) Volcanic demo Clavidact\n DAO Cobra Soft CW-Morse The Hellion\n MARC Caspak Kryllis : when we lost one life, the game does not restart\n ----------------------------- --------------------- ------------------------------------------------------------\n\n[]{#anchor-38}Tape with altered charset\n---------------------------------------\n\n ------------ ------------ -------------------------\n Fire flash Scuba Dive 3D fongus (i,f letters)\n\n ------------ ------------ -------------------------\n\n[]{#anchor-39}Joysticks issues\n------------------------------\n\nWe did keyboard/joystick mapping for a lot of games, but we did not set\nthe keyboard mapping for all software. If you want to help us, contact\nus.\n\nSome game does not work because they handle their own keyboard routine.\nIt could be handle with hardware tricks but, it's not done.\n\nSome others games uses special keys (SHIFT, CTRL) for direction or the\nfirst button. Theses cases are not handle yet : but it could in the\nfuture.\n\n\n\\newpage\n\\newpage\n\n# asm2k2 \n\nlaunch asm2k2 demo\n\n/#asm2k2\n\n\\newpage\n\n# bank\n\n## Introduction\n\nBank command is command line tool to see which bank are loaded into EEPROM bank and RAM bank. Each bank has a\n\"signature\". Bank allows to see theses banks.\nBank can also starts a ROM with his id. In that case, you don’t need to have a rom \"orix friendly\" and you can start it\nfrom command line. In the current bank version, there is restriction to launch a command.\n\n## SYNOPSYS\n\n### List all bank (when ROM signature is valid)\n\n/#bank\nBank 1 to 32 is eeprom bank and bank 33 to 64 are ram bank\n\n### Displays all signature even when ROM is not valid\n\n/#bank\n\n### List all commands from a bank\n\n/#help -b5\n\n### Start a specific bank\n\n/#bank 1\n\nIf you need to load a rom into a bank, you need to have a look to orixcfg binar\n\n## DESCRIPTION\n\nThis command displays bank when the command is called without parameter. WIth a parameter, you can switch to a the id of the bank passed to the argument :\n\nbank : displays all the bank (if a signature is found)\nbank 4 : switch to bank 4\nbank -a : displauys all bank (empty bank too)\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/bank.asm\n\\newpage\n\n# Barboric/Barbitoric demo\n\nLaunch barbitoric demo from orix (sdcard/usbdrive)\n\n/# barboric\n\\newpage\n\n# Blakes7\n\nStart Blakes7\n\n/#blakes7\n\nIf you want to remove your saved game :\n\n/#cd /usr/share/blakes7\n\n/usr/share/blakes7# rm 136.o\n\n## Check the version\n\n/#blakes7 -v\n\\newpage\n\n# Command: bootfd\n\n## Starts microdisc bootsector\n\nbootfd is a tool to boot the boot sector when a drive is connected.\nInsert a disk and type :\n\n/#bootfd\n\nThe binary is located in \"/bin\" folder. It will load microdisc rom and\nwill start it. If it displays \"microdis.rom not found\",\nthen place \"microdis.rom\" file in the right folder.\n\nIf you have firmware 1, you will be able to load « blake's 7 ». If you\nhave firmware 2, all sedoric .dsk should start.\n\nFor instance, only Space99 does not work, it stops after intro.\n\n## SYNOPSYS\n+ bootfd\n\n## EXAMPLES\n+ bootfd\n\nBoots only microdisc boot sector with the help of microdisc.rom. In this version, we don't verify if a dsk is inserted or not. If there is no disk, the oric will crash\n\n## SOURCE\nSrc code : https://github.com/orix-software/bootfd.git (Assembly and C)\n\\newpage\n\n# Born1983\n\nLaunch born 1983 demo\n\n/#born1983\n\n\\newpage\n\n# cat\n\n## Introduction\n\n Display a file to the screen\n\n## SYNOPSYS\n\n+ cat FILENAME\n\n## NOTES\n\nDisplays content of the FILENAME\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/cat.asm\n\\newpage\n\n# cd\n\n## Introduction\n\nChange directory\n\n## SYNOPSYS\n\n+ cd DIRECTORY\n\n## EXAMPLES\n\n+ cd /usr/bin\n+ cd ..\n+ cd /\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/cd.asm\n\\newpage\n\n# Command: cksum\n\n### checksum utility\n\n## SYNOPSYS\n+ cksum -h|-v\n+ cksum file [...]\n+ cksum @batchfile\n\n## EXAMPLES\n+ cksum /bin/cksum\n\n## DESCRIPTION\nchecksum and count the bytes in a file\n\n## OPTIONS\n* -h\n show this help message and exit\n* -v\n display program version and exit\n\n## SOURCE\nhttps://github.com/orix-software/cksum\n\n{width=\"7.685cm\" height=\"5.595cm\"}\n\\newpage\n\n# clear\n\n## Introduction\n\nClear the screen\n\n## SYNOPSYS\n\n+ clear\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/clear.asm\n\\newpage\n\n# cp\n\n## Introduction\n\n Copy a file to another file\n\n## SYNOPSYS\n\n+ #cp from to\n\n## DESCRIPTION\n\nCopy a file to another. Only 40KB can be copied. It's a software limit. cp tries to allocate 40KB, if there is not enough memory it displays Out of memory.\n\n## EXAMPLES\n\n+ cp from.txt to.txt\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/cp.asm\n\\newpage\n\n# df \n\nDisplays storage device informations\n\n{width=\"17cm\" height=\"12.293cm\"}\n\\newpage\n\n# Command: dsk-util\n\n### Disk image utility\n\n## SYNOPSYS\n+ dsk-util -f|-s file.dsk\n+ dsk-util -h\n\n## EXAMPLES\n+ dsk-util -f ftdos.dsk\n+ dsk-util -s sedoric3.dsk\n\n## DESCRIPTION\n**dsk-util** display the directory of a disk image file.\n\n## OPTIONS\n* -h\n show this help message and exit\n* -f\n FTDOS disk image\n* -s\n Sedoric disk image\n\n## SOURCE\nhttps://github.com/orix-software/dsk-util\n\n\\newpage\n\n# echo\n\n## Introduction\n\nDisplay a message\n\n## SYNOPSYS\n\n+ #echo hello\n\n## DESCRIPTION\n\nDisplay a message\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/echo.asm\n\\newpage\n\n# env\n\n## Introduction\n\nDisplays env variables\n\n## SYNOPSYS\n\n+ #env\n\n## DESCRIPTION\n\nDisplays env variables. It can't be used in command line or shell script for instance\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/env.asm\n\\newpage\n\n# Command: forth\n\nLaunch forth for Orix\n\n## SYNOPSYS\n+ forth\n\n## DESCRIPTION\n\n**Forth** is a language based on teleforth (telestrat). You can type **VLIST** to list all words. If you need to switch to hires, you can type **GRAFX** and **HIRES**. If you need to switch to text: **GRAFX** and **TEXT**\n\n\nYou can use forth language. It works the same ways than Tele forth (it's\nteleforth but it write file on sdcard/usbdrive).\n\nYou can download Teleforth langage in order to see how to program in\nforth.\n\nWhen you type « forth » forth starts with the current folder directory.\n\nIf you were in /, forth will load and save file in this folder.\n\nIn others cases, you can create a forth folder in home and goes into it\nin forth for example :\n\n* mkdir home\n* #cd home\n* #mkdir forth\n* forth\n\nif you type « cd forth» in forth environnement, all files actions will\nbe perform in « /*home/*forth »\n\n## SOURCE\n\nhttps://github.com/assinie/Tele-Forth\n\\newpage\n\n# Ftdos\r\n\r\n## Introduction\r\n\r\nftdos command starts ftdos .dsk file without jasmin floppy disc controler. It's a beta version.\r\n\r\n## Usage\r\n\r\nTo start a .dsk file\r\n\r\n/#ftdos mydsk\r\n\r\nTo display the ftdos command version :\r\n\r\n/#ftdos -v\r\n\r\nftdos disks must be present in /home/basic11/dsk/\r\n\r\n## Informations\r\n\r\n* Write on .dsk file is not supported\r\n* refers to ftdos manual for usages\r\n* you need to switch off/on the oric when you reset into ftdos mode, because there is a bug when we reset during ftdos mode. Ftdos does not load and reboot into Orix\r\n\\newpage\n\n# Command: grep\n\n### grep utility\n\n## SYNOPSYS\n+ grep [-ncisw] string filename\n+ grep -h\n\n## DESCRIPTION\n**grep** display all lines of a text file that contain a string.\n\n## OPTIONS\n* -h^GDisplay command syntax\n* -n^GShow line numbers\n* -c^GCount only the matching lines\n* -i^GIgnore case\n* -w^GString can use wildcards *, ?, ^ and $\n* -s^GSilent mode\n\n## EXAMPLES\n+ grep error menu.sub\n+ grep -n \"level 1\" menu.sub\n+ grep -i ERROR menu.sub\n+ grep -ni 'level 2' menu.sub\n+ grep -w '\\*lev?? 2\\*'menu.sub\n+ grep -w '^if' menu.sub\n+ grep -w 'error$' menu.sub\n+ grep -w 'if*level ??' menu.sub\n\n## SOURCE\nhttps://github.com/orix-software/grep\n\n\n{width=\"7.685cm\" height=\"5.595cm\"}\n\\newpage\n\n# help\n\n## Introduction\n\nDisplay commands\n\n## SYNOPSYS\n\n+ help\n\n## DESCRIPTION\n\n- No parameter : Displays all internals shell command\n- bX parameter : Displays all internals commands in bank X\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/help.asm\n\\newpage\n\n# Command: hexdump\n\n### hexdump utility\n\n## SYNOPSYS\n+ hexdump -v\n+ hexdump -h\n+ hexdump -b bankid [,offset]\n+ hexdump file\n\n## EXAMPLES\n+ hexdump /bin/hexdump\n+ hexdump -b 7\n+ hexdump -b 33,16128\n\n## DESCRIPTION\nDisplay file or bank contents in hexadecimal.\nYou can use [SPACE] to pause the display ou [CTRL]+C to abort.\n\n## SOURCE\nhttps://github.com/orix-software/hexdump\n\n\n{width=\"7.685cm\" height=\"5.595cm\"}\n\\newpage\n\n# ioports\n\n## Introduction\n\nDisplay I/O Ports of the board\n\n## SYNOPSYS\n\n+ #ioports\n\n## DESCRIPTION\n\nDisplays I/O ports.\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/ioports.asm\n\\newpage\n\n# ls\n\n## Display catalog\n\nList all the file in the current folder. Token are supported (*,?) ex : « ls ?e.tap » will list all files with a ‘e ’ in the\nsecond letter\n\n## SYNOPSYS\n\nlist all the files in the current folder\n\n+ ls\n\nList all *.tap files\n\n+ ls *.tap\n\nList size and datetime of the file\n+ ls -l\n\n## DESCRIPTION\n\nDirectories are in ^FBLUE^G color. It manages '-l' and Pattern works in different ways : ls *.tap\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/ls.asm\n\\newpage\n\n# lscpu\n\n## Introduction\n\nDisplays cpu info. It detects : 6502, 65c02 and 65816\n\n## SYNOPSYS\n\n+ #lscpu\n\n## DESCRIPTION\n\nDisplays cpu info\n\n## EXAMPLES\n\n+ lscpu\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/lscpu.asm\n\\newpage\n\n# lsmem\n\n## Introduction\n\n Displays malloc table\n\n## SYNOPSYS\n\n+ #lsmem\n\n## DESCRIPTION\n\nDisplays malloc table. Free chunks and busy chuncks are displayed with ranges.\n\n## EXAMPLES\n\n+ lsmem\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/lsmem.asm\n\\newpage\n\n# man\n\n## Introduction\n\ndisplay manual pages\n\n## SYNOPSYS\n\n+ man lsmem\n\n## DESCRIPTION\n\nDisplays manual. All .hlp files are located in /usr/share/man/. It manages multiples text screen (if .hlp is bigger than a screen when space is pressed, it switch to next page).\n\n## Keys\n\n* SPACE to switch to next page\n\n* ESC to exit\n\n## EXAMPLES\n\n+ man ls\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/man.asm\n\\newpage\n\n# mkdir\n\n## Introduction\n\nCreate a folder\n\n## SYNOPSYS\n\n+ /#mkdir PATH\n\n## DESCRIPTION\n\nCreate a folder. -p (recursive mode) option is not available\n\n## EXAMPLES\n\n+ mkdir /opt\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/mkdir.asm\n\\newpage\n\n# mount\n\n## Introduction\n\nDisplays mounts\n\n## SYNOPSYS\n\n+ #mount\n\n## DESCRIPTION\n\nDisplays mount (usb key or sdcard)\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/mount.asm\n\n{width=\"7.685cm\" height=\"5.595cm\"}\n\\newpage\n\n# otimer\n\n## Introduction\n\nDisplay timer since the Oric is power on\n\n## SYNOPSYS\n\n+ otimer\n\n## NOTES\n\nEach time Oric reboots, this timer is reset\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/otimer.asm\n\\newpage\n\n# pwd\n\n## Introduction\n\n Display the current path\n\n## SYNOPSYS\n\n+ pwd\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/pwd.asm\n\\newpage\n\n# ps\n\n## Introduction\n\nDisplays process list\n\n## SYNOPSYS\n\n+ #ps\n\n## DESCRIPTION\n\nDisplays process list\n\n## EXAMPLES\n\n+ ps\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/ps.asm\n\\newpage\n\n# Quintessential demo\n\nStart quintessential demo \n\n/#quintes\n\\newpage\n\n# reboot\n\n## Introduction\n\n reboot machine\n\n## SYNOPSYS\n\n+ reboot\n\n## DESCRIPTION\n\nReboot the machine: it call NMI VECTORS and flush page 2 and page 0\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/reboot.asm\n\\newpage\n\n# rm\n\n## Introduction\n\nremove file\n\n## SYNOPSYS\n\n+ /# rm /myfile\n\n## DESCRIPTION\n\nrm file or directory. For instance, rm can only remove file in root folder, relative arg does not work yet\n\n## EXAMPLES\n\n+ rm /myffile\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/rm.asm\n\\newpage\n\n# sh\n\n## Introduction\n\nWhen kernel has finished to initialize at boot time, sh command is started in interactive mode\n\n## Interactive mode\n\n*Esc-b* : move cursor at the beginning of the previous word\n\n*Esc-f* : move cursor at the end of the next word\n\n*Esc-l* : switch current word into lowercase, and put cursor at the end of the word\n\n*Esc-u* : switch current word into uppercase, and put cursor at the end of the word\n\n*Ctrl-a* : move cursor at the beginning of the line\n\n*Ctrl-e* : move cursor at the end of the line\n\n*Ctrl-c* : cancel current line\n\n*Ctrl-k* : delete the end of the line\n\n*Ctrl-l* : clear screen, and displays the line, the cursors keeps his position\n\n*Ctrl-u* : clear the line and put cursor at the beginning of the line\n\n*Ctrl-t* : swap char under the cursor with the previous one, and shift the cursor to the right\n\n*Ctrl-o* : Switch into replacement or insertion mode\n\n## Shortcuts for tools\n\n*Funct+A* : Start Systemd rom\n\n*Funct+B* : Start Basic11\n\n*Funct+G* : Start basic11 gui\n\n*Funct+L* : Start Loader\n\n*Funct+T* : Start Twilighte setup\n\n## Available commands\n\nThe command line is limited in characters (37). If you reach this limit, you won’t be able to type the complete command line Known bugs\n\n* « ./ » can not be used to launch a binar\n\\newpage\n\n# twil\n\n## Introduction\n\nTwil command can displays the current firmware of twilighte card, and\ncan swap root folder to usbkey or sdcard.\n\n## SYNOPSYS\n\n+ /#twil -f : displays Twilighte board firmware\n+ /#twil -u : switch default device : usbdrive\n+ /#twil -s : swap default device to : sdcard\n\n## DESCRIPTION\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/twil.asm\n\\newpage\r\n\r\n[]{#anchor-65}MONITOR\r\n=====================\r\n\r\n[]{#anchor-66}Usage\r\n-------------------\r\n\r\nMonitor is a rom which can displays a monitor. It's teleass without\r\nassembler part.\r\n\r\n\\newpage\r\n\r\n[]{#anchor-67}ORIXCFG\r\n=====================\r\n\r\n[]{#anchor-68}Update kernel, shell : orixcfg\r\n--------------------------------------------\r\n\r\nWhen a new released is done, you can update the eeprom with the new\r\nkernel and new roms.\r\n\r\nIf you want to know if you need to update the kernel, you can compare\r\nyour current version, and the last release version. You can go to\r\n[http://orix.oric.org](http://orix.oric.org/) You need to have a look to\r\nthis release below :\r\n\r\n{width=\"17cm\" height=\"9.888cm\"}\r\n\r\nIf on your Oric screen, it's not the same value, you can update it. The\r\nsequence of the Orix release is Year.X. There is 4 releases a year, and\r\neach one must be done until you reach the final one, in order to avoid\r\nsome case. If your version is v2020.3 and the last available version is\r\nv2021.4. You need to update to v2020.4, then v2021.1, v2021.2, v2021.3,\r\nv2021.4, v2022.1, v2022.4\r\n\r\nIt's maybe possible to jump to version v2022.3, but it's at your own\r\nrisk because there is a « chance » that some kernel calls changed, and\r\norixcfg could do wrong step.\r\n\r\n[]{#anchor-69} Update kernel, shell\r\n----------------------------------\r\n\r\nWhen you need to update kernel, you can update it with orixcfg. You just\r\nneed to define set 4 on the command line. This step is very\r\n**dangerous** if you don't load the right file. There is no verification\r\nand any file on the command line will be load in the kernel set.\r\nUsually, kernel set file is named kernxxxx.r64.\r\n\r\nIf you did something wrong on this step, you won't be able to start orix\r\nagain. It means that you will need to remove eeprom from the card and\r\nprogram it with a eeprom programmer\r\n\r\nThis next command will load kernel.r64 to kernel set. Please wait until\r\nOrix reboots. If you have a kernel 2020.3 and you need to load a kernel\r\n2021,1, you will need to load previous kernel set before the update of\r\n2021.1.\r\n\r\n.r64 extension means that it's a 64KB set. It's usually used to define\r\nthat the file contains 4 roms of 16KB.\r\n\r\nPlease note that we provide 2 kernels version. One named\r\n« kernelsd.r64 » which means that the default device will be sdcard, and\r\nthe other one « kernelus.r64 » which means that default device will be\r\n« usb » (usbkey). If you load the wrong kernel at this step, you can use\r\ntwil command to switch to the right device, and you can start again\r\nkernel update with the right file (kernelsd.r64 or kernelus.r64\r\ndepending of your configuration).\r\n\r\n/\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n[]{#anchor-70}Load a ROM into a ram slot\r\n----------------------------------------\r\n\r\nSpace between values and switches are not optionnal, orixcfg needs\r\ntheses spaces\r\n\r\n/\\#orixcfg -b XX -l myrom.rom\r\n\r\nThis command will load myrom.rom (in the current path), in RAM bank XX\r\n\r\nOlder usage as : orixcfg -r -s X -b Y myrom.rom is no longer included in\r\norixcfg since orixcfg v2021.3\r\n\r\n[]{#anchor-71}Load a set of ROM into ROM slot\r\n---------------------------------------------\r\n\r\n/\\#orixcfg -r -s 0 myrom.r64\r\n\r\n[]{#anchor-72}This command will load myrom.r64 (in the current path), in\r\nset 0. For instance, you can not load one bank, you need to load 64KB\r\nset.\r\n\r\n[]{#anchor-73}Clear bank ram or initialize it\r\n---------------------------------------------\r\n\r\nRam bank are not initialized when the board is tested. If you have\r\ngarbage on screen when you uses bank (after you used twil -w). You have\r\nto clear all ram bank (ram bank are battery saved).\r\n\r\nIf you want to clear bank 4 of the set 0, you can do this command. You\r\nneed to do this command for each bank of each set. For instance, there\r\nis no switch to clear all the ram with one command.\r\n\r\n/\\#orixcfg -w -s 0 -b 4 -c\r\n\r\n[]{#anchor-74}Flush all ram bank\r\n--------------------------------\r\n\r\n/\\#orixcfg -w -f\r\n\r\n\\newpage\r\n\r\n\r\n[]{#anchor-75}OSID MUSIC\r\n========================\r\n\r\n[]{#anchor-76}How to play osid music ?\r\n--------------------------------------\r\n\r\nYou need to check if you have twilighte board firmware 2 :\r\n\r\n/\\#twil -f\r\n\r\nIf it returns 2 or greater, you can download some osid files :\r\n\r\n<https://www.oric.org/software/osid_music-2534.html>\r\n\r\nPlace all .tap files in /home/basic11\r\n\r\nAnd launch :\r\n\r\n/\\#basic11\r\n\r\nLoad patch to avoid to load sedoric routines (in basic command line)\r\n\r\nCLOAD«OSID\r\n\r\nAnd then load the osid file you want :\r\n\r\nCLOAD«OSNEVER\r\n\r\n\\newpage\r\n\r\n[]{#anchor-77}PWD\r\n=================\r\n\r\n[]{#anchor-78}Introduction\r\n--------------------------\r\n\r\nDisplays current PWD\r\n\r\n\\newpage\r\n\r\n[]{#anchor-79}SHA1\r\n==================\r\n\r\n[]{#anchor-80}Usage\r\n-------------------\r\n\r\nSha1 is a tool to displays a string into sha1 encoding\r\n\r\n\\newpage\r\n\r\n[]{#anchor-81}STORMLORD\r\n=======================\r\n\r\n[]{#anchor-82}Introduction\r\n--------------------------\r\n\r\nStormlord is Stormlord game port to Orix. You can use joysticks to plays\r\nto this game.\r\n\r\n[]{#anchor-83}Only one joystick port is working on this version\r\n===============================================================\r\n\r\n\\newpage\r\n\r\n[]{#anchor-84}SYSTEMD\r\n=====================\r\n\r\nSystemd is a rom which can load another ROM in ram slot. When you type\r\nsystemd, it will reads * /etc/systemd/banks and will load rom declared\r\nin this file sequencialy. It means that the first rom will be load in\r\nbank id 33, the second one in bank id 34.*\r\n\r\nThis roms can be used in a eeprom bank, you can load it with orixcfg\r\n\r\nYou can set roms in *« /etc/systemd/banks » as : *\r\n\r\n*\\[MYROMNAME1\\]*\r\n\r\n*path=/usr/share/rom/my.rom*\r\n\r\n*\\[MYROMNAME2\\]*\r\n\r\npath=/usr/share/rom/my2.rom\r\n\\newpage\n\n# Command: submit\n\n### submit utility\n\n## SYNOPSYS\n+ submit file [arg...]\n\n## EXAMPLES\n+ submit help.sub\n\n## DESCRIPTION\n**submit** is a command language interpreter that executes commands read from a file.\n\n## OPTIONS\n* no options\n\n## SOURCE\nhttps://github.com/orix-software/submit\n\n\\newpage\n\n# Command: untar\n\n### untar utility\n\n## SYNOPSYS\n+ untar -t[v]f tarfile\n+ untar -x[v]f tarfile\n\n## EXAMPLES\n+ untar -tf /home/test.tar\n\n## DESCRIPTION\n**untar** list and extract files in a TAR archive\n\n## OPTIONS\n* -h ^Bshow this help message and exit\n* -t ^Blist files\n* -x ^Bextract files\n* -v ^Bverbose mode\n\n## SOURCE\nhttps://github.com/orix-software/untar\n\n\\newpage\r\n\r\n[]{#anchor-92}CUMULUS COMPATIBILITY\r\n===================================\r\n\r\n[]{#anchor-93}How to connect a cumulus\r\n---------------------------------------\r\n\r\nOn the current firmware (Firmware 1) : and current hardware (board\r\nversion v0.65), we have to do some hacks to have cumulus working. But,\r\nyou will only launch two diskfile. Anyway, you can access to drive with\r\nno restriction, except bank switching. See « Hardware and firmware\r\nupgrade », if you want to avoid theses modifications\r\n\r\nIn firmware 1, and with board modification, there is only two working\r\ndisk : Blake's 7 and VIP2015.\r\n\r\nPlease, test your cumulus on the oric connected to the board. If it does\r\nnot work on your Oric, it won't work too with card plugged !\r\n\r\nIf you want to use cumulus, you have to :\r\n\r\n[]{#anchor-94}1) cut 4 pins on daughter card (ROMDIS, MAP, A14, A15)\r\n\r\n{width=\"11.137cm\"\r\nheight=\"14.185cm\"}\r\n\r\n[]{#anchor-95}2) remove eprom from cumulus\r\n\r\n{width=\"15.722cm\"\r\nheight=\"20.657cm\"}\r\n\r\n[]{#anchor-96}3) add another amplibus before twilighte daughter board\r\n\r\n{width=\"17cm\"\r\nheight=\"15.452cm\"}\r\n\r\n[]{#anchor-97}4) Connect all cards to the oric\r\n\r\n{width=\"16.346cm\" height=\"18.554cm\"}\r\n-------------------------------------------------------------------------------------------------\r\n\r\n[]{#anchor-98}Twilighte board firmware compatibility\r\n----------------------------------------------------\r\n\r\nOnly firmware 2 is available to use boot sector to start Microdisc disk.\r\n\r\n[]{#anchor-99}Hardware and firmware upgrade\r\n===========================================\r\n\r\n[]{#anchor-100}Firmware upgrade\r\n-------------------------------\r\n\r\nThere is only one firmware available. The version 2 is in development.\r\n\r\n[]{#anchor-101}First method : For those who have programmers and some hardware tool\r\n-----------------------------------------------------------------------------------\r\n\r\nBut, when it will be released, you could update the firmware with :\r\n\r\n1\\) a plcc extractor\r\n\r\n2\\) altera software (Quartys v13)\r\n\r\n3\\) a Jtag programmer\r\n\r\n4\\) solder the jtag connector\r\n\r\n5\\) get .pof file\r\n\r\n[]{#anchor-102}Second method : send the card to the author of the card (me)\r\n---------------------------------------------------------------------------\r\n\r\nIn that case, fimware upgrade will be done, and you could ask to upgrade\r\nto new board version to add (sometimes new functionnality)\r\n\r\n\\newpage\r\n\r\n[]{#anchor-103}TROUBLE SHOOTING\r\n===============================\r\n\r\n[]{#anchor-104}'ls' displays garbage on screen\r\n----------------------------------------------\r\n\r\nInsert your sdcard or your usb drive into your PC. You should have\r\nstrange « file » on the sdcard : remove theses files.\r\n\r\n[]{#anchor-105}Impossible to mount a usb key or a sdcard\r\n--------------------------------------------------------\r\n\r\nThe sdcard must be in FAT32 format\r\n\r\n[]{#anchor-106}Screen garbage when i use bank\r\n---------------------------------------------\r\n\r\nIf you have screen garbage when you switched to ram bank before with\r\n« twil -w »\r\n\r\nIt means that ram bank are not initialized. See orixcfg section to fix\r\nit\r\n\r\n[]{#anchor-107}Pi zero always reboots\r\n-------------------------------------\r\n\r\nCheck your PSU. If you have a 2A PSU and you have a pi zero, cumulus and\r\nTOM2 connected, you should reach the PSU limits. If you can't get\r\nanother PSU, you can disable bluetooth of you pi zero, or you can also\r\ndownclock from 1Ghz to 700mhz for example.\r\n\r\nYou can also use a 3A PSU. In the future, it will be possible to add\r\nanother PSU on the board.\r\n\r\n[]{#anchor-108}When i start Orix, filesytem is unstable or displays usb controler not found\r\n-------------------------------------------------------------------------------------------\r\n\r\nIf you have pi zero connected, it could answer to the controler partial\r\ninformation or could hang the usb controler because controler does not\r\nunderstand usb data when it sends information to usb port.\r\n\r\nYou have to wait a bit. If you want to verify this, you can switch off\r\nthe oric (and then the pi zero), switch on the oric with Orix, and type\r\n'debug', if you have another value than \\#AA for ch376 check exists,\r\nit's the problem, if you do 'debug' another value will be displayed but\r\nnot \\#AA. In fact, when pi zero boot, usb controler is unstable.\r\n\r\n[]{#anchor-109}« I/O Error » is displayed\r\n-----------------------------------------\r\n\r\nYou can reach this message in some case :\r\n\r\n1. device (sdcard or usbdrive is missing)\r\n2. after a launch of « df » command : There is an issue, the controler\r\n is in incorrect state after this command. It's a bug\r\n\r\nYou can usually fix it by launching « ls » twice. Because « ls » handles\r\na reset order to the usb controler when it does not produce the correct\r\nanswer. It means that if USB controler is not working well, ls displays\r\nthe error message and will produce a reset command to the controler. If\r\nyou launch ls again, it will work.\r\n\r\n[]{#anchor-110}The oric does not work : black screen\r\n----------------------------------------------------\r\n\r\nIf you have a pi zero connected on usb port, unplug it. Boot the oric,\r\nand now insert pi zero into usb port\r\n\r\n[]{#anchor-111}Kernel panic\r\n---------------------------\r\n\r\nWhen kernel can't solve a « free memory kernel call» in a binary, it\r\ncould produce a kernel panic. In that case, you need to do a reset.\r\nThere is a bug in kernel 2021.1 which could produce this error. It will\r\nbe corrected as soon as possible.\r\n\r\n[]{#anchor-112}A folder is displayed on my PC but not under my Oric\r\n-------------------------------------------------------------------\r\n\r\nSometimes sdcard or usbkey has bad format for the usb controler and it\r\ncan reads some content. Format the usb key or sdcard and install again\r\nall files. Or try another usb key/sdcard\r\n\r\n[]{#anchor-113}I have strange behavior when un do csave or cload on basic ROM : It's always the same file event i cload another content\r\n---------------------------------------------------------------------------------------------------------------------------------------\r\n\r\nSometimes sdcard or usbkey has bad format for the usb controler and it\r\ncan reads some content. Format the usb key or sdcard and install again\r\nall files. Or try another usb key/sdcard\r\n\r\n\r\n[]{#anchor-200}Garbage on basic screen\r\n---------------------------------------------------------------------------------------------------------------------------------------\r\n\r\nWhen you start altered charset on screen when you start basic11 command (or funct+T), download again basic.tgz here : http://repo.orix.oric.org/dists/official/tgz/6502/\r\n\r\nuntar and unzip it on your device, it should fix this : the charset.chs file had been altered\r\n\r\n\\newpage\r\n\r\n[]{#anchor-114}Q&A\r\n==================\r\n\r\n[]{#anchor-115}I want to change the current directory\r\n-----------------------------------------------------\r\n\r\nSee « cd » command\r\n\r\n[]{#anchor-116}I want to see which bank are loaded into ROM and RAM\r\n-------------------------------------------------------------------\r\n\r\nSee «bank» section\r\n\r\n[]{#anchor-117}I want to read a .dsk file\r\n-----------------------------------------\r\n\r\nYou can only extract files from a .dsk file (see « dsk-util »)\r\n\r\nIf you have a cumulus board, you can use « bootfd » and connect your\r\ncumulus on expansion board « see how to connect a cumulus section »\r\n\r\n[]{#anchor-118}I can't type anything in basic rom (« basic11 » command)\r\n-----------------------------------------------------------------------\r\n\r\nThere is a firmware bug on some board which generate a false state for\r\nthe third button of a joystick. The easier step to avoid this, is to\r\nconnect a joystick to the left port on the board.\r\n\r\nThe issue can be fixed by upgrading firmware board (it needs to open the\r\nbox and program the firmware with Jtag port)\r\n\r\n\\newpage\r\n\r\n[]{#anchor-119}ANNEXES\r\n======================\r\n\r\n[]{#anchor-120}Firmware version\r\n-------------------------------\r\n\r\n --------- ------------------------------------------------------------- ------------\r\n Version Features Known bugs\r\n 1 RAM/ROM switch, ROM programmation, joysticks, usb controler N/A\r\n 2 Start all sedoric disks from cumulus N/A\r\n --------- ------------------------------------------------------------- ------------\r\n\r\n[]{#anchor-121}Upgrade from older version\r\n-----------------------------------------\r\n\r\nUpgrade from v2022.1 to v2022.3\r\n---------------------------------------------\r\n\r\nIf your orix version is below v2022.1 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2022.3\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2022.3*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2022.3\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2022.3\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n\r\nUpgrade from v2021.4 to v2022.1\r\n---------------------------------------------\r\n\r\nIf your card is below v2021.4 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2022.1\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2022.1*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2022.1\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2022.1\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\nUpgrade from v2021.3 to v2021.4\r\n-------------------------------\r\n\r\nIf your card is below v2021.3 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2021.4\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.4*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.4\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.4\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n[]{#anchor-122}Upgrade from v2021.2 to v2021.3\r\n----------------------------------------------\r\n\r\nYou need to unzip/untar orixcfg new version here :\r\n<http://repo.orix.oric.org/dists/2021.3/tgz/6502/orixcfg.tgz>\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz> or\r\n <http://repo.orix.oric.org/dists/2021.3/tgz/6502/cardridge.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.3*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.3\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.3\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n\r\n[]{#anchor-123}Upgrade from v2021.1 to v2021.2\r\n\r\nIf your card is below v2021.1 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2021.2\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.2*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.2\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.2\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n[]{#anchor-125}From 2020.4 to 2021.1\r\n\r\nDownload : <http://repo.orix.oric.org/dists/2021.1/tgz/6502/carts.zip>\r\n\r\nUnzip it on your device (sdcard/usbkey)\r\n\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.1*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.1\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.1\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n\r\n\r\n[]{#anchor-200}Optionnal step for upgrade\r\n-----------------------------------------\r\n\r\nNow bank displays all banks from l to 64. It means that you should have\r\nsome strange bank signature for eeprom. Now an empty set is provided in\r\n*/usr/share/carts/2021.3* folder. With Orixcfg you can initialize your\r\nset with this cart. Don't use « -s 4 » flag for orixcfg when you want to\r\nload emptyset."
},
{
"alpha_fraction": 0.5627118349075317,
"alphanum_fraction": 0.5917675495147705,
"avg_line_length": 20.288660049438477,
"blob_id": "ab3162e663926e1d6a0b6637561c619af76b9570",
"content_id": "35324b3fd572ee85deb61a649d89ddccf9bc7fde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2069,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 97,
"path": "/doxygen/doxybook_output/Files/vi__check__inserted__char__overflow__the__max__column_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_inserted_char_overflow_the_max_column.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_inserted_char_overflow_the_max_column.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_inserted_char_overflow_the_max_column](Files/vi__check__inserted__char__overflow__the__max__column_8s.md#function-vi-check-inserted-char-overflow-the-max-column)**() |\n\n\n## Functions Documentation\n\n### function vi_check_inserted_char_overflow_the_max_column\n\n```cpp\nvi_check_inserted_char_overflow_the_max_column()\n```\n\n\n\n\n## Source code\n\n```cpp\n\n.proc vi_check_inserted_char_overflow_the_max_column\n ; Pas d'arg en entrée\n ; Cette routine renvoie dans\n ; A=0 si on a un texte qui ne dépasse pas 40 colonnes\n ; A=1 si on a une ligne texte qui dépasse la ligne de 40 colonnes et X contient le nb de lignes sur l'éditeur de cette ligne texte\n ; A=2 si on a une ligne de plus de 256 chars : overflow\n ; A=3 si cela la ligne fait exactement 1 ligne, il faudra donc scroller\n\n lda #$00\n sta vi_tmp1\n\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tax\n dex\n stx vi_tmp3\n\n ldy #$00\n@L1:\n lda (vi_ptr1),y\n cmp #CR\n beq @exit\n inx\n cpx #VI_EDITOR_MAX_COLUMN+1\n bne @continue\n\n inc vi_tmp1\n ldx #$00\n\n@continue:\n inc vi_tmp3\n iny\n bne @L1\n lda #$02 ; Overflow\n rts\n\n@exit:\n lda vi_tmp3\n cmp #VI_EDITOR_MAX_COLUMN+1\n beq @exactly_a_new_line\n\n ldx vi_tmp1\n beq @no_line_editor_overflow\n lda #$01\n rts\n\n@no_line_editor_overflow:\n lda #$00\n rts\n\n@exactly_a_new_line:\n lda #$03\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5078684091567993,
"alphanum_fraction": 0.54935622215271,
"avg_line_length": 15.642857551574707,
"blob_id": "93df795eb5b8bad3f622a0b3b8d5237dd7df9c84",
"content_id": "9052c3686bcdab155bfe35d0016462bcc7b1a649",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 699,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 42,
"path": "/doxygen/doxybook_output/Files/vi__set__xpos__0_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_xpos_0.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_xpos_0.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_xpos_0](Files/vi__set__xpos__0_8s.md#function-vi-set-xpos-0)**() |\n\n\n## Functions Documentation\n\n### function vi_set_xpos_0\n\n```cpp\nvi_set_xpos_0()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_set_xpos_0\n ldy #vi_struct_data::xpos_screen\n lda #$00\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.49632352590560913,
"alphanum_fraction": 0.5358455777168274,
"avg_line_length": 16.26984214782715,
"blob_id": "91e18b0acded9eaf427d057ec8a0f91ad9e73ab4",
"content_id": "dd1a7a5613b6ef416e9348f3f799f1aeb01fc25e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1088,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 63,
"path": "/doxygen/doxybook_output_vi/Files/vi__copy__arg1__to__name__file__open_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_copy_arg1_to_name_file_open.s\n\n---\n\n# vi_copy_arg1_to_name_file_open.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_copy_arg1_to_name_file_open](Files/vi__copy__arg1__to__name__file__open_8s.md#Routine-vi-copy-arg1-to-name-file-open)** |\n\n\n## Routine documentation\n\n### Routine vi_copy_arg1_to_name_file_open\n\n```ca65\nvi_copy_arg1_to_name_file_open\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_copy_arg1_to_name_file_open\n ldy #$00\n sty vi_tmp1\n\n ldy #vi_struct_data::name_file_open\n sty vi_tmp2\n\n@loop_copy_filename:\n ldy vi_tmp1\n lda (vi_arg1),y\n beq @exit_copy_filename\n iny\n sty vi_tmp1\n ldy vi_tmp2\n sta (vi_struct),y\n iny\n sty vi_tmp2\n cpy #VI_MAX_LENGTH_FILENAME+vi_struct_data::name_file_open\n bne @loop_copy_filename\n\n lda #$00 ; EOS if overflow\n@exit_copy_filename:\n\n ldy vi_tmp2\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6161616444587708,
"alphanum_fraction": 0.6401515007019043,
"avg_line_length": 16.80337142944336,
"blob_id": "df8298efc1be55ad8decdb84ee96cbe077359479",
"content_id": "e14e58d9559ad57a40178eb6089844b899bb4898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3168,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 178,
"path": "/doxygen/doxybook_output_vi/Classes/structvi__struct__data.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_struct_data\n\n---\n\n# vi_struct_data\n\n\n\n\n\n## Public Attributes\n\n| | Name |\n| -------------- | -------------- |\n| char | **[xpos_screen](Classes/structvi__struct__data.md#variable-xpos-screen)** |\n| char | **[ypos_screen](Classes/structvi__struct__data.md#variable-ypos-screen)** |\n| char[2] | **[pos_file_addr](Classes/structvi__struct__data.md#variable-pos-file-addr)** |\n| char[4] | **[pos_file](Classes/structvi__struct__data.md#variable-pos-file)** |\n| char | **[posx_command_line](Classes/structvi__struct__data.md#variable-posx-command-line)** |\n| char[VI_MAX_LENGTH_FILENAME] | **[name_file_open](Classes/structvi__struct__data.md#variable-name-file-open)** |\n| | **[int](Classes/structvi__struct__data.md#variable-int)** |\n| | **[ptr_file_begin](Classes/structvi__struct__data.md#variable-ptr-file-begin)** |\n| char[4] | **[length_file](Classes/structvi__struct__data.md#variable-length-file)** |\n| char[2] | **[pos_ptr_video_address](Classes/structvi__struct__data.md#variable-pos-ptr-video-address)** |\n| char[4] | **[file_number_of_line](Classes/structvi__struct__data.md#variable-file-number-of-line)** |\n| char[1] | **[xpos_command_line](Classes/structvi__struct__data.md#variable-xpos-command-line)** |\n| char[39] | **[command_line_buffer](Classes/structvi__struct__data.md#variable-command-line-buffer)** |\n| char[2] | **[ptr_last_char_file](Classes/structvi__struct__data.md#variable-ptr-last-char-file)** |\n| char[2] | **[line_id](Classes/structvi__struct__data.md#variable-line-id)** |\n| char[1] | **[xpos_text](Classes/structvi__struct__data.md#variable-xpos-text)** |\n| char[1] | **[ypos_text](Classes/structvi__struct__data.md#variable-ypos-text)** |\n\n## Public Attributes Documentation\n\n### variable xpos_screen\n\n```ca65\nchar xpos_screen;\n```\n\n\nposition x of the cursor on the screen \n\n\n### variable ypos_screen\n\n```ca65\nchar ypos_screen;\n```\n\n\nposition y of the cursor on the screen \n\n\n### variable pos_file_addr\n\n```ca65\nchar[2] pos_file_addr;\n```\n\n\nposition on the file (address) \n\n\n### variable pos_file\n\n```ca65\nchar[4] pos_file;\n```\n\n\nposition in the file \n\n\n### variable posx_command_line\n\n```ca65\nchar posx_command_line;\n```\n\n\nposition on command line \n\n\n### variable name_file_open\n\n```ca65\nchar[VI_MAX_LENGTH_FILENAME] name_file_open;\n```\n\n\n### variable int\n\n```ca65\nint;\n```\n\n\nadress of the beginning of the file \n\n\n### variable ptr_file_begin\n\n```ca65\nptr_file_begin;\n```\n\n\n### variable length_file\n\n```ca65\nchar[4] length_file;\n```\n\n\nLength of the file \n\n\n### variable pos_ptr_video_address\n\n```ca65\nchar[2] pos_ptr_video_address;\n```\n\n\n### variable file_number_of_line\n\n```ca65\nchar[4] file_number_of_line;\n```\n\n\n### variable xpos_command_line\n\n```ca65\nchar[1] xpos_command_line;\n```\n\n\n### variable command_line_buffer\n\n```ca65\nchar[39] command_line_buffer;\n```\n\n\n### variable ptr_last_char_file\n\n```ca65\nchar[2] ptr_last_char_file;\n```\n\n\n### variable line_id\n\n```ca65\nchar[2] line_id;\n```\n\n\n### variable xpos_text\n\n```ca65\nchar[1] xpos_text;\n```\n\n\n### variable ypos_text\n\n```ca65\nchar[1] ypos_text;\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 12:29:28 +0100"
},
{
"alpha_fraction": 0.6036866307258606,
"alphanum_fraction": 0.6059907674789429,
"avg_line_length": 16.31999969482422,
"blob_id": "7d3be40582509333d2bb85479a895e8db9eb409d",
"content_id": "f838f7fe5d0696fe789a494f733d0eb29decf6db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 25,
"path": "/docs/commands/dsk-util.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: dsk-util\n\n### Disk image utility\n\n## SYNOPSYS\n+ dsk-util -f|-s file.dsk\n+ dsk-util -h\n\n## EXAMPLES\n+ dsk-util -f ftdos.dsk\n+ dsk-util -s sedoric3.dsk\n\n## DESCRIPTION\n**dsk-util** display the directory of a disk image file.\n\n## OPTIONS\n* -h\n show this help message and exit\n* -f\n FTDOS disk image\n* -s\n Sedoric disk image\n\n## SOURCE\nhttps://github.com/orix-software/dsk-util\n\n"
},
{
"alpha_fraction": 0.677172064781189,
"alphanum_fraction": 0.6916524767875671,
"avg_line_length": 22.959182739257812,
"blob_id": "df711419d75725b5eae26415f1266f48b1bc4665",
"content_id": "0dd80f07cda700121fcdd959faa1fe5bc09e2d3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1174,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 49,
"path": "/docs/kernel/primitives/xgetargv.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XGETARGV\n\n## Description\n\nGet argv. X register contains the number of the arg search\n\nKernel handle a struct with XMAINARGS. This struct is handled by Kernel, and no action are required in external code, but here is how struct works :\n\n``` ca65\n.struct XMAINARGS_STRUCT\nargv_ptr .res KERNEL_MAX_ARGS_COMMAND_LINE\nargv_value_ptr .res KERNEL_LENGTH_MAX_CMDLINE+KERNEL_MAX_ARGS_COMMAND_LINE ; add 0 to string\n.endstruct\n```\n\nargv_ptr contains an offset of each param. It means that we can't have a length of args greater than 256.\n\nXMAINARGS and XGETARGV does not handle \"\" yet and \"\\ \"\n\n## Input\n\nA & Y the ptr of struct from XMAINARGS\n\nX the number of arg to get, first param is 0 not 1 !\n\n## Output\n\nA & Y contains argv ptr from xmainargs struct. It returns a copy of the command line with args parsed\n\n## Example\n\n```ca65\n .include \"telestrat.inc\"\n\n XMAINARGS = $2C\n XGETARGV = $2E\n BRK_TELEMON XMAINARGS\n sta debug_mainargs_ptr\n sty debug_mainargs_ptr+1\n\n ldx #$02 ; get arg 2 ; Get the third param\n lda debug_mainargs_ptr\n ldy debug_mainargs_ptr+1\n\n BRK_KERNEL XGETARGV\n\n ; A & Y contains ptr\n rts\n```\n"
},
{
"alpha_fraction": 0.5446686148643494,
"alphanum_fraction": 0.5533141493797302,
"avg_line_length": 11.34615421295166,
"blob_id": "5d9d7ea6db215846d1688518c1340c61e9d34366",
"content_id": "2db2cc53a0ce06257bbb0ca9ddfa2108f41205f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 26,
"path": "/docs/developer_manual/orixsdk_macros/getcwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Getcwd\r\n\r\n## Description\r\n\r\nGet cwd\r\n\r\n## Usage\r\n\r\ngetcwd ptr\r\n\r\nnote:\r\nptr may be: ptr, address\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_dir.mac\"\r\n\r\n pwd := userzp ; 2 byte\r\n\r\n getcwd pwd\r\n rts\r\n```\r\n\r\nCall [XGETCWD](../../../kernel/primitives/XGETCWD/) kernel function.\r\n"
},
{
"alpha_fraction": 0.478658527135849,
"alphanum_fraction": 0.5137194991111755,
"avg_line_length": 13.260869979858398,
"blob_id": "18e7108c1a7926dacbae5c40bf89b826b9186d67",
"content_id": "ccb7587a41e50ec19b23bd37cdb2fb752e16b673",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 656,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 46,
"path": "/doxygen/doxybook_output_vi/Files/vi__set__ptr__last__char_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_set_ptr_last_char.s\n\n---\n\n# vi_set_ptr_last_char.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_ptr_last_char](Files/vi__set__ptr__last__char_8s.md#Routine-vi-set-ptr-last-char)** |\n\n\n## Routine documentation\n\n### Routine vi_set_ptr_last_char\n\n```ca65\nvi_set_ptr_last_char\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_set_ptr_last_char\n ; A and X contains the last char ptr\n\n ldy #vi_struct_data::ptr_last_char_file\n sta (vi_struct),y\n txa\n iny\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7389557957649231,
"alphanum_fraction": 0.7389557957649231,
"avg_line_length": 13.647058486938477,
"blob_id": "a19180fe83a5c8418a0725c4ef2d4cac44fead73",
"content_id": "cc97d83baa48cc65564e62385df0e072bac46d78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 17,
"path": "/docs/commands/touch.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# touch\n\n## Introduction\n\nCreate an empty file\n\n## SYNOPSYS\n\n+ #touch myfile\n\n## DESCRIPTION\n\nCreate an empty file. Time does not affect the timstamp of the file.\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/touch.asm\n"
},
{
"alpha_fraction": 0.7166666388511658,
"alphanum_fraction": 0.7233333587646484,
"avg_line_length": 25.18181800842285,
"blob_id": "9e9817f97b2da7cc806e7502ea77385eba931749",
"content_id": "f0c1cad53720d5c661af9d77100d8ef98b617e25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 609,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 22,
"path": "/docs/network.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Carte réseau ethernet pour Oric\r\n\r\n* vraie carte réseau ethernet avec gestion de la stack tcp/ip hardware\r\n* vitesse 40KB/sec du réseau à la ram, et 23KB du réseau à la sdcard\r\n\r\nEn cours de dev, mais voici les choses en cours et en partie fonctionnel pour certains:\r\n\r\n* Gestion du dhcp\r\n* check du cable\r\n* ifconfig\r\n* resvctl pour voir le dns server ou le setter\r\n* netcnf pour une vue de tous les paramètres\r\n* curl\r\n* dig\r\n* serveur telnetd\r\n* netstat\r\n* nc : permet l'envoi de log sur du loghost\r\n* ip\r\n* mini browser\r\n* ping\r\n\r\nApi réseau possible à l'avenir pour ses propres programmes.\r\n\r\n"
},
{
"alpha_fraction": 0.7041420340538025,
"alphanum_fraction": 0.7100591659545898,
"avg_line_length": 27.823530197143555,
"blob_id": "d03560d8411340212ad14ffcb00ddb741de1009f",
"content_id": "b74fdac83a3e6b26bb92a8a0252b8236b5fc0b85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 17,
"path": "/docs/developer_manual/kernel.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Kernel\r\n\r\nKernel is located in bank 7.\r\n\r\nEach primitives are called with \"brk\" instruction with a value after it.\r\n\r\nWe can pass args to AXY or others memory location and the primitive can be called.\r\n\r\n[Orixsdk](../home/orixsdk/) manages some kernel calls, and it's better to use macro set to call some primitives\r\n\r\n## Useful links\r\n\r\n* Documented primitives [Primitives](../../kernel/primitives/)\r\n\r\n* Orixsdk (sdk for assembly) : [Orixsdk](../orixsdk/)\r\n\r\n* C programming : cc65 (telestrat target)\r\n"
},
{
"alpha_fraction": 0.6855087280273438,
"alphanum_fraction": 0.7307296991348267,
"avg_line_length": 30.387096405029297,
"blob_id": "b86b178b4c93b0459bf9254ff17bff575e0b6caa",
"content_id": "2d18d5c0a6c40e361a38b6c9da266ec323a94427",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 973,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 31,
"path": "/docs/binaries/generateBinary.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Build a binary from cc65 or assembly located on sdcard\n\ncc65 generates a non relocation format for telestrat target.\n\nWe decided to stop this format in the kernel v2022.4 because it was too complex to manage it in a kernel which use dynamic memory allocation.\n\nIn order to generate a launchable binary, relocbin must be used : [https://github.com/assinie/orix-sdk/blob/master/bin/relocbin.py3](https://github.com/assinie/orix-sdk/blob/master/bin/relocbin.py3)\n\n## Step one : build your binary from cc65 with the start address $80\n\n``` bash\ncl65 -ttelestrat mysrc.c -o mysrc_800 --start-addr \\$800\n```\n\n## Step Two : build your binary from cc65 with the start address $900\n\n``` bash\n/# cl65 -ttelestrat mysrc.c -o mysrc_900 --start-addr \\$900\n```\n\n## Step three (last one) : generate the relocation format:\n\n``` bash\n/#python relocbin.py3 -2 -o mybin mysrc_800 mysrc_900\n```\n\nPlace mybin in /bin of the sdcard/usb key of the twilighte board, and launch :\n\n```\n/#mybin\n```\n"
},
{
"alpha_fraction": 0.5608108043670654,
"alphanum_fraction": 0.7094594836235046,
"avg_line_length": 36,
"blob_id": "b8a990f84d98d9d40d1d0716b901f088c62c35f4",
"content_id": "578f8838c296194d3ffe226382e7e4791fba8f6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 4,
"path": "/doxygen/doc/html/search/all_8.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['ypos_5fscreen_86',['ypos_screen',['../structvi__struct__data.html#ad6b155867a22adc3adc2a3dac0c03446',1,'vi_struct_data']]]\n];\n"
},
{
"alpha_fraction": 0.518302857875824,
"alphanum_fraction": 0.5440931916236877,
"avg_line_length": 16.171428680419922,
"blob_id": "57145d306f54af9ebcde776bd61177a4ef6d43ee",
"content_id": "29ebf6f827c4ff417e47a87c5c232b686cedffbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1202,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 70,
"path": "/doxygen/doxybook_output/Files/vi__check__eof_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_eof.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_eof.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_eof](Files/vi__check__eof_8s.md#function-vi-check-eof)**() |\n\n\n## Functions Documentation\n\n### function vi_check_eof\n\n```cpp\nvi_check_eof()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_check_eof\n ; return in A 00 if eof\n ; returns in A 01 if not eof\n\n ; Do we reached eof ?\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr_file_used\n cmp vi_tmp1\n bne @not_eof\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr_file_used+1\n cmp vi_tmp1\n bne @not_eof\n@is_eof:\n lda #IS_EOF ; EOF\n rts\n\n@is_eof_before:\n jsr vi_ptr_file_used_plus_plus\n jmp @is_eof\n\n@not_eof:\n\n@not_last_line:\n lda #$01 ; Not eof\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.31221720576286316,
"alphanum_fraction": 0.39819005131721497,
"avg_line_length": 8.608695983886719,
"blob_id": "3ee0b042051b0214d194d733c41a91622ba95a63",
"content_id": "fe282c6f0d8425f7a3f9fd58f0e7ebe4f17c6a86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 23,
"path": "/docs/tools_docs/vi/Files/dir_e1568de7a9ec0caf269f7729a27efb24.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /commands\n\n---\n\n# /commands\n\n\n\n## Files\n\n| Name |\n| -------------- |\n| **[/commands/vi.s](Files/vi_8s.md#file-vi.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7213114500045776,
"alphanum_fraction": 0.7213114500045776,
"avg_line_length": 19.33333396911621,
"blob_id": "3213b44bd0ad316e03ef68b5ab106bc90b66a904",
"content_id": "88699fd41be200a04c385d4ae24371eb6318d970",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 61,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 3,
"path": "/hardware/docs/twilighteboard/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Twilighte board\n\n* [FDC, RTC WIFI controller ](fdcWifiEsp)\n"
},
{
"alpha_fraction": 0.5763688683509827,
"alphanum_fraction": 0.5821325778961182,
"avg_line_length": 11.34615421295166,
"blob_id": "606ab8a163ff3dc336ffdd4c4f168a2be2a5d9cf",
"content_id": "df88eeb52cac7f2e3c33edf9c21b5cfe0a037dc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 26,
"path": "/docs/developer_manual/orixsdk_macros/mkdir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# MKDIR macro\r\n\r\n## Decription\r\n\r\nPerforms a mkdir\r\n\r\n## Usage\r\n\r\nmkdir ptr\r\n\r\nnote:\r\n ptr may be: (ptr), address\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_dir.mac\"\r\n\r\n mkdir myfolder\r\n rts\r\nmyfolder:\r\n .asciiz \"here\"\r\n```\r\n\r\nCall [XMKDIR](../../../kernel/primitives/xmkdir/) function.\r\n"
},
{
"alpha_fraction": 0.5292479395866394,
"alphanum_fraction": 0.5682451128959656,
"avg_line_length": 15.949999809265137,
"blob_id": "fea935639310993dead2c74eb1a89df6c5d10785",
"content_id": "cfa2f83530a4add47b8712657c684752af65569d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 363,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 20,
"path": "/docs/developer_manual/orixsdk_macros/strncpy.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# strncpy\r\n\r\nstrncpy src, dest, n\r\n\r\nA: 0 et Z=1 si copie effectuée, inchangé si non\r\nX: 0\r\nY: Longueur réellement copiée\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<file_path\r\n sta ptr2\r\n lda #>file_path\r\n sta ptr2+1\r\n\r\n ;Limit 20 bytes (immediate mode) strncpy(src, dest, n)\r\n strncpy ptr2, ptr1, #20\r\n rts\r\n```\r\n"
},
{
"alpha_fraction": 0.41333332657814026,
"alphanum_fraction": 0.6745454668998718,
"avg_line_length": 85.84210205078125,
"blob_id": "839bcaf6c7f0fad285b64ce5e3e777d9a4584f85",
"content_id": "ae1bd9e62dc82e73410fce260b5e00c3b47fa769",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1650,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 19,
"path": "/doxygen/doc/html/search/defines_0.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['vi_5farg1_154',['vi_arg1',['../vi_8s.html#a748d82d2b79b25f42baf1aa09b113fa8',1,'vi.s']]],\n ['vi_5fargc_155',['vi_argc',['../vi_8s.html#a2bc2546a95699949586837399ca8f149',1,'vi.s']]],\n ['vi_5fargv_156',['vi_argv',['../vi_8s.html#a73a7e451001ba88c5e9a8e0ad6692a79',1,'vi.s']]],\n ['vi_5ffp_157',['vi_fp',['../vi_8s.html#a8b9ef88939dadbcaa23a36220ebf5d6e',1,'vi.s']]],\n ['vi_5flast_5fline_5feditor_158',['VI_LAST_LINE_EDITOR',['../vi_8s.html#a05a24bfdd3281d1c38bdfd7f5ca60a6f',1,'vi.s']]],\n ['vi_5flength_5ffile_159',['vi_length_file',['../vi_8s.html#abae6739b843cfdb8755d23dd233d8722',1,'vi.s']]],\n ['vi_5flength_5ffile_5fcompute_160',['vi_length_file_compute',['../vi_8s.html#a2f070cdf633a809c34c89b4899976209',1,'vi.s']]],\n ['vi_5fptr1_161',['vi_ptr1',['../vi_8s.html#af920f04dd7b23ce604cf08c94baa406e',1,'vi.s']]],\n ['vi_5fptr2_162',['vi_ptr2',['../vi_8s.html#a1341cfa2ffa64e4b3ab733c22e86b246',1,'vi.s']]],\n ['vi_5fptr3_163',['vi_ptr3',['../vi_8s.html#ac399189ae7b0f0964d92151877097b38',1,'vi.s']]],\n ['vi_5fptr_5ffile_164',['vi_ptr_file',['../vi_8s.html#ad1b28128fceea3595d13c9db24fd9428',1,'vi.s']]],\n ['vi_5fptr_5ffile_5fused_165',['vi_ptr_file_used',['../vi_8s.html#ac164dad3f277278cb7533ebc52e09fcf',1,'vi.s']]],\n ['vi_5fptr_5fscreen_166',['vi_ptr_screen',['../vi_8s.html#a8e36ccca6aae46cd991e5cbcf9e4e805',1,'vi.s']]],\n ['vi_5fsavex_167',['vi_savex',['../vi_8s.html#ae57cc4c96cf4eaed090b78292b1c8f08',1,'vi.s']]],\n ['vi_5fstruct_168',['vi_struct',['../vi_8s.html#a9b6bac3e7816ef8173a6e88e28fe098b',1,'vi.s']]],\n ['vi_5ftmp3_169',['vi_tmp3',['../vi_8s.html#a855290507b1b6f5ec331f1821e740a4c',1,'vi.s']]]\n];\n"
},
{
"alpha_fraction": 0.7222659587860107,
"alphanum_fraction": 0.7269866466522217,
"avg_line_length": 24.93877601623535,
"blob_id": "bca8b9ec78fc4d28e8058e33b988b9d26a0924b4",
"content_id": "f338a2f1fe5d3df1b77fb39523d518892bac26d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1275,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 49,
"path": "/docs/commands/sh.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# sh\n\n## Introduction\n\nWhen kernel has finished to initialize at boot time, sh command is started in interactive mode\n\n## Interactive mode\n\n*Esc-b* : move cursor at the beginning of the previous word\n\n*Esc-f* : move cursor at the end of the next word\n\n*Esc-l* : switch current word into lowercase, and put cursor at the end of the word\n\n*Esc-u* : switch current word into uppercase, and put cursor at the end of the word\n\n*Ctrl-a* : move cursor at the beginning of the line\n\n*Ctrl-e* : move cursor at the end of the line\n\n*Ctrl-c* : cancel current line\n\n*Ctrl-k* : delete the end of the line\n\n*Ctrl-l* : clear screen, and displays the line, the cursors keeps his position\n\n*Ctrl-u* : clear the line and put cursor at the beginning of the line\n\n*Ctrl-t* : swap char under the cursor with the previous one, and shift the cursor to the right\n\n*Ctrl-o* : Switch into replacement or insertion mode\n\n## Shortcuts for tools\n\n*Funct+A* : Start Systemd rom\n\n*Funct+B* : Start Basic11\n\n*Funct+G* : Start basic11 gui\n\n*Funct+L* : Start Loader\n\n*Funct+T* : Start Twilighte setup\n\n## Available commands\n\nThe command line is limited in characters (37). If you reach this limit, you won’t be able to type the complete command line Known bugs\n\n* « ./ » can not be used to launch a binar\n"
},
{
"alpha_fraction": 0.6501831412315369,
"alphanum_fraction": 0.66300368309021,
"avg_line_length": 22.81818199157715,
"blob_id": "a280f796c822e4175d4f544b8dbf4e17dc8e4be7",
"content_id": "2b7c2ced8ad4f3939305464657509473ae98a9e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 22,
"path": "/docs/kernel/primitives/xvars.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XVARS\r\n\r\n## Get the number max of process\r\n\r\n```ca65\r\n ldx #KERNEL_XVARS_KERNEL_MAX_PROCESS ; Value = 8\r\n BRK_KERNEL XVARS\r\n ; A and Y contains the number max of the process available\r\n\r\n```\r\n\r\n## Get the list of the process\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n ldx #KERNEL_XVARS_PID_LIST_PTR ; Value = $0A\r\n BRK_KERNEL XVARS\r\n ; A and Y contains the ptr of the list of current pid.\r\n ; If 0 is set, the slot is available and there is no process\r\n ; The end of the list is defined with #KERNEL_XVARS_KERNEL_MAX_PROCESS\r\n rts\r\n```\r\n"
},
{
"alpha_fraction": 0.4895057678222656,
"alphanum_fraction": 0.5179417729377747,
"avg_line_length": 16.174419403076172,
"blob_id": "184635ef418514ffe82b805e33570f6b84c33489",
"content_id": "058c9d02da844733fa2fd26d6d30c85d5aa6ec61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1477,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 86,
"path": "/doxygen/doxybook_output/Files/vi__key__right_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_right.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_right.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_key_right](Files/vi__key__right_8s.md#function-vi-key-right)**() |\n\n\n## Functions Documentation\n\n### function vi_key_right\n\n```cpp\nvi_key_right()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_key_right\n\n ; are we on the end of the text file ?\n\n ; Compute if we need to erase ~\n jsr vi_check_eof\n cmp #IS_EOF\n bne @not_eof\n rts\n\n@not_eof:\n jsr vi_editor_switch_off_cursor\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @no_add_x\n cmp #LF\n beq @no_add_x\n\n jsr vi_ptr_file_used_plus_plus\n jsr vi_xpos_screen_plus_plus\n cmp #$01\n bne @no_add_x\n\n jsr vi_set_xpos_0\n jsr vi_ypos_screen_plus_plus\n cmp #$01\n bne @no_add_x\n\n scroll up, 0, 26 ; Yes scroll\n\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n lda vi_ptr1\n bne @S1\n dec vi_ptr1+1\n@S1:\n dec vi_ptr1\n\n\n lda vi_ptr1\n ldy vi_ptr1+1\n jsr vi_fill_last_line\n@no_add_x:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6717948913574219,
"alphanum_fraction": 0.6717948913574219,
"avg_line_length": 9.263157844543457,
"blob_id": "5e387d626edfb6b8550d52daabd53f6d3583719c",
"content_id": "85f482462d18221a70aa8a113f3728c35c1a565f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 19,
"path": "/docs/commands/cd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# cd\n\n## Introduction\n\nChange directory\n\n## SYNOPSYS\n\n+ cd DIRECTORY\n\n## EXAMPLES\n\n+ cd /usr/bin\n+ cd ..\n+ cd /\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/cd.asm\n"
},
{
"alpha_fraction": 0.5436241626739502,
"alphanum_fraction": 0.7114093899726868,
"avg_line_length": 36.25,
"blob_id": "3cdf695782ec8124af1dc0863734eff6ca0f44ea",
"content_id": "881bf3fac24a0fde8e853e41b918ce302eb19e37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 4,
"path": "/doxygen/doc/html/search/variables_0.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['length_5ffile_151',['length_file',['../structvi__struct__data.html#a3a41260445a6c5ed9b6d54ec356a7b7e',1,'vi_struct_data']]]\n];\n"
},
{
"alpha_fraction": 0.4906832277774811,
"alphanum_fraction": 0.5254658460617065,
"avg_line_length": 14.188679695129395,
"blob_id": "8c3bafdda6dcce985832734f51fc13ff3991e4e2",
"content_id": "3649ba8eb0c974d3fad4bcf072d2765bbc31593f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 53,
"path": "/doxygen/doxybook_output_vi/Files/vi__ptr__file__used__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ptr_file_used_sub_sub.s\n\n---\n\n# vi_ptr_file_used_sub_sub.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_file_used_sub_sub](Files/vi__ptr__file__used__sub__sub_8s.md#Routine-vi-ptr-file-used-sub-sub)** |\n\n\n## Routine documentation\n\n### Routine vi_ptr_file_used_sub_sub\n\n```ca65\nvi_ptr_file_used_sub_sub\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_ptr_file_used_sub_sub\n\n jsr vi_check_beginning_of_file\n cmp #IS_BEGINNING_OF_THE_FILE\n beq @beginning_of_file\n\n lda vi_ptr_file_used ; 98\n bne @out\n dec vi_ptr_file_used+1\n@out:\n dec vi_ptr_file_used\n lda #$01\n rts\n\n@beginning_of_file:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5772727131843567,
"alphanum_fraction": 0.5909090638160706,
"avg_line_length": 9.421052932739258,
"blob_id": "bfee63d058ff33ac1fb83912f9a1ec79586cf63b",
"content_id": "423e363e916a7756e959afc700270ec0c4b01927",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 19,
"path": "/kernel/docs/primitives/xgetcwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XGETCWD (getcwd)\r\n\r\n## Description\r\n\r\nReturn current directory of the process\r\n\r\n## Input\r\n\r\n## Output\r\n\r\nA/Y returns a string\r\n\r\n## Example\r\n\r\n``` ca65\r\n BRK_KERNEL XGETCWD\r\n BRK_KERNEL XWSTR0\r\n rts\r\n``` \r\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5766233801841736,
"avg_line_length": 11.275861740112305,
"blob_id": "d834a6539b7f376417f215da97f77212fba7cee1",
"content_id": "db70fa55714447bb37743d1bee2270ef1af623b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 29,
"path": "/docs/kernel/primitives/xclose.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XCLOSE\r\n\r\n## Description\r\n\r\nClose file\r\n\r\n## Input\r\n\r\nA : fd\r\n\r\n## Output\r\n\r\n## Modify\r\n\r\nRESB\r\n\r\n## Samples\r\n\r\n### Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda MYFD ; MYFD contains the value of the fd (returns of fopen)\r\n BRK_KERNEL XCLOSE\r\n rts\r\n```\r\n\r\n!!! tip \"See [fclose](../../../developer_manual/orixsdk_macros/fclose) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.4593023359775543,
"alphanum_fraction": 0.487209290266037,
"avg_line_length": 15.86274528503418,
"blob_id": "0004fff78046198828f112a1baa4c92a159b0bbe",
"content_id": "2f74cc9185381d69b84314c6dc8d8f100617c856",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 860,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 51,
"path": "/doxygen/doxybook_output_vi/Files/__clrscr__vi_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/_clrscr_vi.s\n\n---\n\n# /Routines/_clrscr_vi.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[_clrscr_vi](Files/__clrscr__vi_8s.md#Routine--clrscr-vi)** |\n\n\n## Routine documentation\n\n### Routine _clrscr_vi\n\n```ca65\n_clrscr_vi\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc _clrscr_vi\n ; Switch to text mode\n BRK_TELEMON(XTEXT)\n\n lda #<SCREEN ; Get position screen\n ldy #>SCREEN\n sta RES\n sty RES+1\n\n ldy #<(SCREEN+SCREEN_XSIZE*SCREEN_YSIZE)\n ldx #>(SCREEN+SCREEN_XSIZE*SCREEN_YSIZE)\n lda #' '\n BRK_TELEMON XFILLM ; Calls XFILLM : it fills A value from RES address and size of X and Y value\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6974169611930847,
"alphanum_fraction": 0.7195571660995483,
"avg_line_length": 14,
"blob_id": "9b54fca277df2c927b32405295709cffcb20e852",
"content_id": "4eadcdc3024f049a8259f5c89097578a53bba9a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 18,
"path": "/docs/commands/raw2dsk.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# raw2dsk\n\n### raw2dsk utility\n\n## SYNOPSYS\n+ raw2dsk rawfile dskfile\n\n## EXAMPLES\n+ raw2dsk dumpdsk.raw dumpdsk.dsk\n\n## DESCRIPTION\n**raw2dsk** convert raw floppy disk dump file to .dsk file\n\n## OPTIONS\n* no options\n\n## SOURCE\nhttps://github.com/orix-software/raw2dsk\n\n"
},
{
"alpha_fraction": 0.517241358757019,
"alphanum_fraction": 0.543448269367218,
"avg_line_length": 17.78238296508789,
"blob_id": "315d43f877316cd44e9a86e823e491477a0d7514",
"content_id": "1263fe77200881d9ae76bc970e1a9107cb227196",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3635,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 193,
"path": "/docs/tools_docs/vi/Files/vi__key__up_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_key_up.s\n\n---\n\n# vi_key_up.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_key_up](Files/vi__key__up_8s.md#Routine-vi-key-up)** |\n\n\n## Routine documentation\n\n### Routine vi_key_up\n\n```ca65\nvi_key_up\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_key_up\n ; Debut du fichier ? On sort\n jsr vi_check_beginning_of_file\n cmp #IS_BEGINNING_OF_THE_FILE\n beq @stop_shorter\n\n ; On eteint le curseur\n jsr vi_editor_switch_off_cursor\n\n ; On force à revenir à la position X=0 de la ligne précécente : FIXME\n jsr vi_set_xpos_0\n\n@continue_up:\n ; Checking if previous char is not $0A or $0D if it's not the case, it's on a truncated line\n\n ; On va voir si la prédente ligne c'est une fin de ligne ou pas\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n lda vi_ptr1 ; 98 ; On décrémente la copie de ptr_file_used pour lire le caractère précédent\n bne @out_compare\n dec vi_ptr1+1\n@out_compare:\n dec vi_ptr1\n\n ldy #$00\n lda (vi_ptr1),y\n cmp #LF\n beq @search_previous_line\n\n ;ldy #$00\n lda (vi_ptr1),y\n cmp #CR\n beq @search_previous_line\n\n lda vi_ptr1 ; 98\n bne @out_compare2\n dec vi_ptr1+1\n@out_compare2:\n dec vi_ptr1\n\n ldy #$00\n lda (vi_ptr1),y\n cmp #CR\n beq @search_previous_line\n\n ; Check if the\n\n jsr vi_ypos_screen_sub_sub\n\n\n jsr vi_search_previous_line_beginning ; Looking for previous line ($0D detection)\n\n\n jsr vi_ptr_file_used_sub_sub ; Skip $0D\n\n cpx #VI_EDITOR_MAX_COLUMN+1\n bcc @skip_y_plusplus\n\n jsr vi_ypos_screen_sub_sub\n\n@skip_y_plusplus:\n\n jsr vi_search_previous_line_beginning ; Looking for previous line ($0D detection)\n\n jsr vi_ptr_file_used_plus_plus\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #LF\n bne @S10\n jsr vi_ptr_file_used_plus_plus\n@S10:\n\n\n@stop_shorter:\n rts\n\n\n@search_previous_line:\n jsr vi_search_previous_line_beginning ; Looking for previous line ($0D detection)\n cmp #$00\n beq @exit\n\n jsr vi_search_previous_line_beginning\n cmp #$00\n beq @exit\n\n cpx #VI_EDITOR_MAX_COLUMN\n bcc @no_line_greater_than_39_chars\n\n\n jsr vi_ptr_file_used_plus_plus\n\n ldx #$00\n@looking_for_truncated_line:\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @finished\n cmp #LF\n beq @skip_inx\n inx\n@skip_inx:\n jsr vi_ptr_file_used_plus_plus\n\n cpx #VI_EDITOR_MAX_COLUMN+1\n bne @looking_for_truncated_line\n ldx #$00\n beq @looking_for_truncated_line\n\n@finished:\n jsr vi_ypos_screen_sub_sub\n\n txa\n jsr vi_set_xpos_from_A\n rts\n\n@no_line_greater_than_39_chars:\n jsr vi_ptr_file_used_plus_plus\n\n ldy #$00\n jsr vi_check_0A\n\n@exit:\n ; Check if posx=0 and not the beginning of the file\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n bne @stop\n\n scroll down, 0, 26\n\n ldx #$01\n stx vi_savex\n\n ldx vi_savex\n cpx #$00\n beq @stop\n\n ldy #$00\n@L1:\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @stop\n sta VI_EDITION_VIDEO_ADRESS,y\n iny\n cpy #VI_EDITOR_MAX_COLUMN\n beq @stop\n bne @L1\n\n@stop:\n jsr vi_ypos_screen_sub_sub\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6147540807723999,
"alphanum_fraction": 0.6623479723930359,
"avg_line_length": 27.43609046936035,
"blob_id": "39a56f0aaf130c7453b36f5d13ced8e1735c23a8",
"content_id": "1b198916779bf1a7184e0d9f18fd30b98bfa4c0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3782,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 133,
"path": "/pandoc/build.sh",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "cat parts/part1.md > manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/asm2k2.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/bank.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/barboric.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/blakes7.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/bootfd.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/born1983.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/cat.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/cd.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/cksum.md >> manual.md\necho '{width=\"7.685cm\" height=\"5.595cm\"}' >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/clear.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/cp.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/df.md >> manual.md\necho \"\" >> manual.md\necho '{width=\"17cm\" height=\"12.293cm\"}' >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/dsk-util.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/echo.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/env.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/forth.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/ftdos.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/grep.md >> manual.md\necho \"\" >> manual.md\necho '{width=\"7.685cm\" height=\"5.595cm\"}' >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/help.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/hexdump.md >> manual.md\necho \"\" >> manual.md\necho '{width=\"7.685cm\" height=\"5.595cm\"}' >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/ioports.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/ls.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/lscpu.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/lsmem.md >> manual.md\n\n\ncho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/man.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/mkdir.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/mount.md >> manual.md\necho \"\" >> manual.md\necho '{width=\"7.685cm\" height=\"5.595cm\"}' >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/more.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/otimer.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/pwd.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/ps.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/quintes.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/reboot.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/rm.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/sh.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/twil.md >> manual.md\n\ncat parts/pizero.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/submit.md >> manual.md\n#echo \"\\n======================\\n\">> manual.md\n#cat ../docs/commands/subdoc.md >> manual.md\n\necho \"\\\\\\newpage\\n\">> manual.md\ncat ../docs/commands/untar.md >> manual.md\n\ncat parts/last.md >> manual.md\n\npandoc -V geometry:margin=.5in -s -o ../twilighte_manual.pdf manual.md\n"
},
{
"alpha_fraction": 0.720335066318512,
"alphanum_fraction": 0.7333643436431885,
"avg_line_length": 33.78333282470703,
"blob_id": "0d788aecc6189c34bd56c7bb61def943cec43628",
"content_id": "bf2d66b0bf5c4cab65aee10e45e0dfc59e2398b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2149,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 60,
"path": "/docs/hardware/microdisc_controler.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# How to connect a microdisc controler with the twilighte board\r\n\r\n* Solder A14 et A15 on the microdisc controler to ground\r\n* remove eeprom\r\n* connect silicebit amplibus on twilighte board expansion port\r\n\r\n\r\n\r\n## Issues\r\n\r\n* Reset from expansion board does not work, the one on the microdisc controler is working\r\n* *The usb controler in the twilighte board extension refuses to work properly during 20 minutes*, you need to wait 20 minutes before trying to use commands on twilighte configuration.\r\n\r\n## Steps\r\n\r\n### Step 1 : Check Drive\r\n\r\nCheck if the switches are ok on the drive :\r\n\r\n\r\n\r\n### Step 3 : solder A14 and A15 to ground on microdisc to oric connector\r\n\r\n\r\n\r\n### Step 4 : Remove Eprom\r\n\r\nSee the blue empty connector : the eprom is removed\r\n\r\n\r\n\r\n### Step 5 : Connect and switch on\r\n\r\n* Connect microdisc controler to the oric with the amplibus like this (Insert Silicebit amplibus into *expansion board connector* and the ribbon to microdisc controler in silicebit amplibus connector)\r\n\r\n\r\n\r\n* Connect the drive to microdisc controler\r\n* Switch on the oric with its own psu\r\n* Switch on the microdisc controleur with its own psu too\r\n\r\n!!! warning \"The internal usb controler in Twilighte board refuses to work correctly during 20 minutes, in this configuration. Wait a bit. It's a problem with the ribbon cable connected in the amplibus\"\r\n\r\n### Step 6 : Uses bootfd to start bootsector\r\n\r\n/#bootfd\r\n\r\n### Step 7 (Optionnal) : Use a pico psu for the microdisc controler\r\n\r\nIf you have a micro psu, you can plug it for your usb controler. You can also buy an connector to plug the micro psu if you want to insert it in a case.\r\n\r\nThe following photo shows the pico psu and the adaptor for pico psu.\r\n\r\n\r\n\r\n* Solder \"power on\" on the adaptator, in order to have the pico starting\r\n\r\n\r\n\r\n* plug a 12V PSU (1,5A used during test), in the jack connector of the pico psu\r\n\r\n"
},
{
"alpha_fraction": 0.5692307949066162,
"alphanum_fraction": 0.5846154093742371,
"avg_line_length": 13.29411792755127,
"blob_id": "04ada67ba1c4346da8d52edbb692e496ef36f550",
"content_id": "e22a4cc5a90ae1bbac54200d6e956f86f859cb10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 17,
"path": "/docs/kernel/primitives/xhires.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XHIRES\r\n\r\n## Usage\r\n\r\nSwitch to Hires\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n ldx #$00\r\n BRK_KERNEL XHIRES\r\n rts\r\n```\r\n\r\n!!! tip \"See [setscreen](../../../developer_manual/orixsdk_macros/setscreen) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.4322916567325592,
"alphanum_fraction": 0.5260416865348816,
"avg_line_length": 9.666666984558105,
"blob_id": "d74152a18e365f3252cbf7509a1b2e8502e83a53",
"content_id": "3da4ed916b7376563a2ecd82065ed83e392c5fed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 18,
"path": "/docs/tools_docs/vi/Examples/This-example.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: This\nsummary: strlen of a line \n\n---\n\n# This\n\n\n\nstrlen of a line\nroutine compute ... ```\n\n_Filename: This_\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6729559898376465,
"alphanum_fraction": 0.6729559898376465,
"avg_line_length": 12.782608985900879,
"blob_id": "29e0a394c7ed7b7e1220494634ff11ec792ca551",
"content_id": "c7b59b68173b2c204593f5d297c88cb0304113b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 23,
"path": "/docs/commands/more.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: more\n\n### more utility\n\n## SYNOPSYS\n+ more <file>\n\n## EXAMPLES\n+ more /etc/autoboot.sub\n\n## DESCRIPTION\n**more** display a text file one screenfull at a time.\n\n*[RETURN]* display next line.\n\n*[CTRL]+C* break.\n\n*q * quit.\n\nAny other keys: display next page.\n\n## SOURCE\nhttps://github.com/orix-software/more\n\n"
},
{
"alpha_fraction": 0.5669782161712646,
"alphanum_fraction": 0.6064382195472717,
"avg_line_length": 15.603447914123535,
"blob_id": "5e6b42a326d7add97318a64ccc271b281d4d16fd",
"content_id": "d4402873bd544c613b24a01da5eab4a52f6e37a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 963,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 58,
"path": "/doxygen/doxybook_output/Files/strings_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/data/vi/strings.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/data/vi/strings.s\n\n\n\n## Attributes\n\n| | Name |\n| -------------- | -------------- |\n| char[2] | **[msg_nofilename](Files/strings_8s.md#variable-msg-nofilename)** |\n| char[2] | **[msg_impossibletowrite](Files/strings_8s.md#variable-msg-impossibletowrite)** |\n\n\n\n## Attributes Documentation\n\n### variable msg_nofilename\n\n```cpp\nchar[2] msg_nofilename;\n```\n\n\n### variable msg_impossibletowrite\n\n```cpp\nchar[2] msg_impossibletowrite;\n```\n\n\n\n## Source code\n\n```cpp\nmsg_insert:\n .asciiz \"-- INSERT --\"\n\nmsg_nofile:\n .asciiz \"[New File]\"\n\nmsg_nofilename:\n .byte 17,\"E32: No file name\",16,0\n\nmsg_impossibletowrite:\n .byte 17,\"E99: Impossible to write\",16,0\n\nmsg_written:\n .asciiz \"written\"\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5028735399246216,
"alphanum_fraction": 0.5344827771186829,
"avg_line_length": 14.818181991577148,
"blob_id": "c9d7c8c23123cca08b5f49a97bd979a0c86f78f3",
"content_id": "f9cec676c74dcfa1ba892fe9b8f8ae6a48e95703",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2088,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 132,
"path": "/doxygen/doxybook_output_vi/Files/vi__strlen__current__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_strlen_current_line.s\n\n---\n\n# vi_strlen_current_line.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_strlen_current_line](Files/vi__strlen__current__line_8s.md#Routine-vi-strlen-current-line)** |\n\n\n## Routine documentation\n\n### Routine vi_strlen_current_line\n\n```ca65\nvi_strlen_current_line\n```\n\n\n\n\n## Source code\n\n```ca65\n;; strlen of a line\n;\n;@param A&Y : ptr of the position\n;@return X : the length of the line\n;@return A & Y : the ptr reached\n;@see vi_struct\n;\n;@example This routine compute ...\n;@page strlen\n;;\n\n.proc vi_strlen_current_line\n\n ; Returns in A and Y the ptr of the beginning of the line\n ; Returns in X the length of the line\n\n sta vi_ptr2\n sty vi_ptr2+1\n\n ; Looking for the beginning of the line\n\n ldy #$00\n lda (vi_ptr2),y\n cmp #CR\n bne @search_beginning_of_the_line\n\n lda vi_ptr2 ; 98\n bne @out_compare2\n dec vi_ptr2+1\n@out_compare2:\n dec vi_ptr2\n\n\n@search_beginning_of_the_line:\n ; Check beginning\n\n lda vi_ptr2\n cmp vi_ptr_file\n bne @not_beginning\n\n lda vi_ptr2+1\n cmp vi_ptr_file+1\n bne @not_beginning\n\n lda vi_ptr2\n ldy vi_ptr2+1\n ldx #$00\n\n rts\n\n@not_beginning:\n ldy #$00\n lda (vi_ptr2),y\n cmp #LF\n beq @found_beginning\n cmp #CR\n beq @found_beginning\n\n lda vi_ptr2 ; 98\n bne @out_compare\n dec vi_ptr2+1\n@out_compare:\n dec vi_ptr2\n jmp @search_beginning_of_the_line\n\n@found_beginning:\n inc vi_ptr2\n bne @no_inc\n inc vi_ptr2+1\n@no_inc:\n ; At this step we are at the beginning of the line\n ; search now the end of the line\n\n\n ldx #$00\n\n ldy #$00\n@search_eol:\n\n lda (vi_ptr2),y\n cmp #CR\n beq @found_eol\n\n\n iny\n\n bne @search_eol\n\n@found_eol:\n tya\n tax\n lda vi_ptr2\n ldy vi_ptr2+1\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5564587712287903,
"alphanum_fraction": 0.5977867245674133,
"avg_line_length": 33.45036315917969,
"blob_id": "a21bec339adfa37928056e3e10d63a281eb0ffd3",
"content_id": "d1ab3540b8b049873f4c32e81f73b9053ad9d931",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14734,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 413,
"path": "/pandoc/parts/last.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "\\newpage\r\n\r\n[]{#anchor-92}CUMULUS COMPATIBILITY\r\n===================================\r\n\r\n[]{#anchor-93}How to connect a cumulus\r\n---------------------------------------\r\n\r\nOn the current firmware (Firmware 1) : and current hardware (board\r\nversion v0.65), we have to do some hacks to have cumulus working. But,\r\nyou will only launch two diskfile. Anyway, you can access to drive with\r\nno restriction, except bank switching. See « Hardware and firmware\r\nupgrade », if you want to avoid theses modifications\r\n\r\nIn firmware 1, and with board modification, there is only two working\r\ndisk : Blake's 7 and VIP2015.\r\n\r\nPlease, test your cumulus on the oric connected to the board. If it does\r\nnot work on your Oric, it won't work too with card plugged !\r\n\r\nIf you want to use cumulus, you have to :\r\n\r\n[]{#anchor-94}1) cut 4 pins on daughter card (ROMDIS, MAP, A14, A15)\r\n\r\n{width=\"11.137cm\"\r\nheight=\"14.185cm\"}\r\n\r\n[]{#anchor-95}2) remove eprom from cumulus\r\n\r\n{width=\"15.722cm\"\r\nheight=\"20.657cm\"}\r\n\r\n[]{#anchor-96}3) add another amplibus before twilighte daughter board\r\n\r\n{width=\"17cm\"\r\nheight=\"15.452cm\"}\r\n\r\n[]{#anchor-97}4) Connect all cards to the oric\r\n\r\n{width=\"16.346cm\" height=\"18.554cm\"}\r\n-------------------------------------------------------------------------------------------------\r\n\r\n[]{#anchor-98}Twilighte board firmware compatibility\r\n----------------------------------------------------\r\n\r\nOnly firmware 2 is available to use boot sector to start Microdisc disk.\r\n\r\n[]{#anchor-99}Hardware and firmware upgrade\r\n===========================================\r\n\r\n[]{#anchor-100}Firmware upgrade\r\n-------------------------------\r\n\r\nThere is only one firmware available. The version 2 is in development.\r\n\r\n[]{#anchor-101}First method : For those who have programmers and some hardware tool\r\n-----------------------------------------------------------------------------------\r\n\r\nBut, when it will be released, you could update the firmware with :\r\n\r\n1\\) a plcc extractor\r\n\r\n2\\) altera software (Quartys v13)\r\n\r\n3\\) a Jtag programmer\r\n\r\n4\\) solder the jtag connector\r\n\r\n5\\) get .pof file\r\n\r\n[]{#anchor-102}Second method : send the card to the author of the card (me)\r\n---------------------------------------------------------------------------\r\n\r\nIn that case, fimware upgrade will be done, and you could ask to upgrade\r\nto new board version to add (sometimes new functionnality)\r\n\r\n\\newpage\r\n\r\n[]{#anchor-103}TROUBLE SHOOTING\r\n===============================\r\n\r\n[]{#anchor-104}'ls' displays garbage on screen\r\n----------------------------------------------\r\n\r\nInsert your sdcard or your usb drive into your PC. You should have\r\nstrange « file » on the sdcard : remove theses files.\r\n\r\n[]{#anchor-105}Impossible to mount a usb key or a sdcard\r\n--------------------------------------------------------\r\n\r\nThe sdcard must be in FAT32 format\r\n\r\n[]{#anchor-106}Screen garbage when i use bank\r\n---------------------------------------------\r\n\r\nIf you have screen garbage when you switched to ram bank before with\r\n« twil -w »\r\n\r\nIt means that ram bank are not initialized. See orixcfg section to fix\r\nit\r\n\r\n[]{#anchor-107}Pi zero always reboots\r\n-------------------------------------\r\n\r\nCheck your PSU. If you have a 2A PSU and you have a pi zero, cumulus and\r\nTOM2 connected, you should reach the PSU limits. If you can't get\r\nanother PSU, you can disable bluetooth of you pi zero, or you can also\r\ndownclock from 1Ghz to 700mhz for example.\r\n\r\nYou can also use a 3A PSU. In the future, it will be possible to add\r\nanother PSU on the board.\r\n\r\n[]{#anchor-108}When i start Orix, filesytem is unstable or displays usb controler not found\r\n-------------------------------------------------------------------------------------------\r\n\r\nIf you have pi zero connected, it could answer to the controler partial\r\ninformation or could hang the usb controler because controler does not\r\nunderstand usb data when it sends information to usb port.\r\n\r\nYou have to wait a bit. If you want to verify this, you can switch off\r\nthe oric (and then the pi zero), switch on the oric with Orix, and type\r\n'debug', if you have another value than \\#AA for ch376 check exists,\r\nit's the problem, if you do 'debug' another value will be displayed but\r\nnot \\#AA. In fact, when pi zero boot, usb controler is unstable.\r\n\r\n[]{#anchor-109}« I/O Error » is displayed\r\n-----------------------------------------\r\n\r\nYou can reach this message in some case :\r\n\r\n1. device (sdcard or usbdrive is missing)\r\n2. after a launch of « df » command : There is an issue, the controler\r\n is in incorrect state after this command. It's a bug\r\n\r\nYou can usually fix it by launching « ls » twice. Because « ls » handles\r\na reset order to the usb controler when it does not produce the correct\r\nanswer. It means that if USB controler is not working well, ls displays\r\nthe error message and will produce a reset command to the controler. If\r\nyou launch ls again, it will work.\r\n\r\n[]{#anchor-110}The oric does not work : black screen\r\n----------------------------------------------------\r\n\r\nIf you have a pi zero connected on usb port, unplug it. Boot the oric,\r\nand now insert pi zero into usb port\r\n\r\n[]{#anchor-111}Kernel panic\r\n---------------------------\r\n\r\nWhen kernel can't solve a « free memory kernel call» in a binary, it\r\ncould produce a kernel panic. In that case, you need to do a reset.\r\nThere is a bug in kernel 2021.1 which could produce this error. It will\r\nbe corrected as soon as possible.\r\n\r\n[]{#anchor-112}A folder is displayed on my PC but not under my Oric\r\n-------------------------------------------------------------------\r\n\r\nSometimes sdcard or usbkey has bad format for the usb controler and it\r\ncan reads some content. Format the usb key or sdcard and install again\r\nall files. Or try another usb key/sdcard\r\n\r\n[]{#anchor-113}I have strange behavior when un do csave or cload on basic ROM : It's always the same file event i cload another content\r\n---------------------------------------------------------------------------------------------------------------------------------------\r\n\r\nSometimes sdcard or usbkey has bad format for the usb controler and it\r\ncan reads some content. Format the usb key or sdcard and install again\r\nall files. Or try another usb key/sdcard\r\n\r\n\r\n[]{#anchor-200}Garbage on basic screen\r\n---------------------------------------------------------------------------------------------------------------------------------------\r\n\r\nWhen you start altered charset on screen when you start basic11 command (or funct+T), download again basic.tgz here : http://repo.orix.oric.org/dists/official/tgz/6502/\r\n\r\nuntar and unzip it on your device, it should fix this : the charset.chs file had been altered\r\n\r\n\\newpage\r\n\r\n[]{#anchor-114}Q&A\r\n==================\r\n\r\n[]{#anchor-115}I want to change the current directory\r\n-----------------------------------------------------\r\n\r\nSee « cd » command\r\n\r\n[]{#anchor-116}I want to see which bank are loaded into ROM and RAM\r\n-------------------------------------------------------------------\r\n\r\nSee «bank» section\r\n\r\n[]{#anchor-117}I want to read a .dsk file\r\n-----------------------------------------\r\n\r\nYou can only extract files from a .dsk file (see « dsk-util »)\r\n\r\nIf you have a cumulus board, you can use « bootfd » and connect your\r\ncumulus on expansion board « see how to connect a cumulus section »\r\n\r\n[]{#anchor-118}I can't type anything in basic rom (« basic11 » command)\r\n-----------------------------------------------------------------------\r\n\r\nThere is a firmware bug on some board which generate a false state for\r\nthe third button of a joystick. The easier step to avoid this, is to\r\nconnect a joystick to the left port on the board.\r\n\r\nThe issue can be fixed by upgrading firmware board (it needs to open the\r\nbox and program the firmware with Jtag port)\r\n\r\n\\newpage\r\n\r\n[]{#anchor-119}ANNEXES\r\n======================\r\n\r\n[]{#anchor-120}Firmware version\r\n-------------------------------\r\n\r\n --------- ------------------------------------------------------------- ------------\r\n Version Features Known bugs\r\n 1 RAM/ROM switch, ROM programmation, joysticks, usb controler N/A\r\n 2 Start all sedoric disks from cumulus N/A\r\n --------- ------------------------------------------------------------- ------------\r\n\r\n[]{#anchor-121}Upgrade from older version\r\n-----------------------------------------\r\n\r\nUpgrade from v2022.1 to v2022.3\r\n---------------------------------------------\r\n\r\nIf your orix version is below v2022.1 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2022.3\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2022.3*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2022.3\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2022.3\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n\r\nUpgrade from v2021.4 to v2022.1\r\n---------------------------------------------\r\n\r\nIf your card is below v2021.4 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2022.1\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2022.1*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2022.1\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2022.1\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\nUpgrade from v2021.3 to v2021.4\r\n-------------------------------\r\n\r\nIf your card is below v2021.3 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2021.4\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.4*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.4\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.4\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n[]{#anchor-122}Upgrade from v2021.2 to v2021.3\r\n----------------------------------------------\r\n\r\nYou need to unzip/untar orixcfg new version here :\r\n<http://repo.orix.oric.org/dists/2021.3/tgz/6502/orixcfg.tgz>\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz> or\r\n <http://repo.orix.oric.org/dists/2021.3/tgz/6502/cardridge.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.3*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.3\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.3\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n\r\n[]{#anchor-123}Upgrade from v2021.1 to v2021.2\r\n\r\nIf your card is below v2021.1 version, please go to annexes part at the\r\nend of this document, before you try to upgrade to v2021.2\r\n\r\n- Download\r\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\r\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\r\n or sdcard : It could require some time to copy because there is a\r\n lot of small files (tap, hlp etc)\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.2*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.2\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.2\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n[]{#anchor-125}From 2020.4 to 2021.1\r\n\r\nDownload : <http://repo.orix.oric.org/dists/2021.1/tgz/6502/carts.zip>\r\n\r\nUnzip it on your device (sdcard/usbkey)\r\n\r\n- you can start orix on real machine, and type :\r\n\r\n /\\#cd usr\\\r\n /usr\\#cd share\\\r\n /*usr/share\\#cd carts\\\r\n /usr/share/carts\\#cd 2021.1*\r\n\r\n If you want to usr usb drive for default device :\r\n\r\n */usr/share/carts/2021.1\\#orixcfg -r -s 4 kernelus.r64*\r\n\r\n If you want to use sdcard for default device :\r\n\r\n /usr/share/carts/2021.1\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n- press 'y', and **wait until Orix reboots **\r\n\r\n (Don't switch off the Oric at this step)\r\n\r\n\r\n\r\n[]{#anchor-200}Optionnal step for upgrade\r\n-----------------------------------------\r\n\r\nNow bank displays all banks from l to 64. It means that you should have\r\nsome strange bank signature for eeprom. Now an empty set is provided in\r\n*/usr/share/carts/2021.3* folder. With Orixcfg you can initialize your\r\nset with this cart. Don't use « -s 4 » flag for orixcfg when you want to\r\nload emptyset."
},
{
"alpha_fraction": 0.5080500841140747,
"alphanum_fraction": 0.5563506484031677,
"avg_line_length": 15.939393997192383,
"blob_id": "b3a5908b2c767d1a53a5929c03233c4c76bd110e",
"content_id": "25e24656d2632784e8442dee170a669ac1b82697",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 559,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 33,
"path": "/docs/kernel/primitives/xbindx.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XBINDX (16 bit number to decimal)\n\n## Description\n\nconvert A & Y number into decimal string\n\n## Input\n\nA & Y the 16 bits value\n\nTR5 the pointer of the address to put decimal number (String)\n\nDEFAFF : space before the number\n\n## Example 1 : Displays value AY (16 bits number) in $bb80 text mode\n\n```ca65\n .include \"telestrat.inc\"\n\n lda #<$bb80\n sta TR5\n lda #>$bb80\n sta TR5+1\n\n lda #$20\n sta DEFAFF\n\n ldx #$01\n ldy #$00\n lda #$10\n BRK_KERNEL XBINDX\n rts\n```\n"
},
{
"alpha_fraction": 0.5437665581703186,
"alphanum_fraction": 0.5596817135810852,
"avg_line_length": 15.534883499145508,
"blob_id": "dc481d8f3f8d36a8bbdc4deb49654d65d4da192f",
"content_id": "e8d328c1c6e9564e95d195f48c2087143b0dc2bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 754,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 43,
"path": "/docs/developer_manual/orixsdk_macros/getmainargs.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Getmainargs\r\n\r\n## Description\r\n\r\nGet arg\r\n\r\n## Usage\r\n\r\nGet main args : get arg value\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_mainargs.mac\"\r\n\r\n XMAINARGS = $2C\r\n XGETARGV = $2E\r\n\r\n argv := userzp ; 2 bytes\r\n argc := userzp+2 ; 1 byte\r\n\r\n ; Init struct\r\n initmainargs argv, argc, 0\r\n\r\n getmainarg #1, (argv)\r\n\r\n ; Save the ptr of the parameter\r\n sta argv\r\n sty argv+1\r\n\r\n BRK_TELEMON XWSTR0 ; Displays the arg 1 from the command line\r\n rts\r\n```\r\n\r\nResult (argbin) is binary in assembly with a parameter \"foo\"\r\n\r\n```bash\r\n/#argbin foo\r\nfoo\r\n```\r\n\r\nCall [XGETARGV](../../../kernel/primitives/xgetargv/) kernel function.\r\n"
},
{
"alpha_fraction": 0.7298578023910522,
"alphanum_fraction": 0.7298578023910522,
"avg_line_length": 11.411765098571777,
"blob_id": "b8559ee65aa261bb8a220de8727e1d143b4104b2",
"content_id": "c31d43d7ce71932722de88552c80dcd6ffcc5e8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 17,
"path": "/docs/commands/cat.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# cat\n\n## Introduction\n\n Display a file to the screen\n\n## SYNOPSYS\n\n+ cat FILENAME\n\n## NOTES\n\nDisplays content of the FILENAME\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/cat.asm\n"
},
{
"alpha_fraction": 0.6353839039802551,
"alphanum_fraction": 0.6457117199897766,
"avg_line_length": 39.45454406738281,
"blob_id": "e3b9d3a7763c1a443d41e6ae07d5edc4e1e7abb8",
"content_id": "7f37ad59d1f09fea6de41a0198b7faccaaeed23e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2227,
"license_type": "no_license",
"max_line_length": 518,
"num_lines": 55,
"path": "/launch.sh",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n\ndeclare -a tab_command\ndeclare -a tab_repo\n\ntab_command=(\"asm2k2\" \"bank\" \"basic10\" \"basic11\" \"barboric\" \"blakes7\" \"bootfd\" \"born1983\" \"cat\" \"cd\" \"cksum\" \"clear\" \"cp\" \"df\" \"otimer\" \"dsk-util\" \"echo\" \"env\" \"forth\" \"ftdos\" \"grep\" \"help\" \"hexdump\" \"ioports\" \"list\" \"ls\" \"lscpu\" \"lsmem\" \"loader\" \"man\" \"mkdir\" \"mount\" \"more\" \"orixcfg\" \"pwd\" \"ps\" \"quintes\" \"raw2dsk\" \"readdsk\" \"reboot\" \"rm\" \"setfont\" \"loader\" \"twilconf\" \"strerr\" \"sh\" \"submit\" \"touch\" \"twil\" \"twiload\" \"uname\" \"untar\" \"vidplay\" \"viewscr\" \"viewhrs\" \"zerofx\")\ntab_repo=( \"asm2K2\" \"shell\" \"shell\" \"shell\" \"barboric\" \"blakes7\" \"bootfd\" \"born1983\" \"shell\" \"shell\" \"cksum\" \"shell\" \"shell\" \"shell\" \"shell\" \"dsk-util\" \"shell\" \"shell\" \"forth\" \"ftdos\" \"grep\" \"shell\" \"hexdump\" \"shell\" \"list\" \"shell\" \"shell\" \"shell\" \"systemd\" \"shell\" \"shell\" \"shell\" \"more\" \"orixcfg\" \"shell\" \"shell\" \"quintessential\" \"raw2dsk\" \"readdsk\" \"shell\" \"shell\" \"shell\" \"systemd\" \"systemd\" \"strerr\" \"shell\" \"submit\" \"shell\" \"shell\" \"systemd\" \"shell\" \"untar\" \"vidplay\" \"viewscr\" \"shell\" \"zerofx\")\n\nif [ -z $TOKEN_GITHUB_PRIVATE_REPO ]; then\necho Missing TOKEN_GITHUB_PRIVATE_REPO impossible to get private repo\nelse\nGITHUB_AUTH=$TOKEN_GITHUB_PRIVATE_REPO@\necho no\nfi\n\nif [ \"$1\" == \"--pdf\" ]; then\ncd pandoc/ && ./build.sh\nfi\n\nif [ \"$1\" == \"--full\" ]; then\nmkdir docs/commands/ -p\n\nrm docs/commands/all.md\nrm docs/commands/commands_for_nav.md\n\nfor i in ${!tab_command[@]}; do\nVAL=${tab_command[$i]}\n\necho ${tab_repo[$i]}\n\n\ncurl https://github.com/orix-software/pbar-lib/blob/main/docs/api/pbar-lib.md -o docs/libs/\n\nMYURL=\"https://${GITHUB_AUTH}raw.githubusercontent.com/orix-software/${tab_repo[$i]}/master/docs/$VAL.md -o docs/commands/$VAL.md\"\n\necho $MYURL\ncurl $MYURL\nret=$?\n\nif [ $ret -ne 0 ]; then\nexit\nfi\n\nMYURLVERSION=\"https://${GITHUB_AUTH}raw.githubusercontent.com/orix-software/${tab_repo[$i]}/master/VERSION -o VERSION\"\n\ncurl $MYURLVERSION\n\nMYVERSION=`cat VERSION`\n\necho \"* [$VAL](../$VAL) Last version : $MYVERSION\" >> docs/commands/all.md\necho \"- $VAL: commands/$VAL.md\" >> docs/commands/commands_for_nav.md\ndone\nfi\n\nmkdocs build && cp site/* . -r && git add * && git commit -m update && echo push && git push\n\n\n"
},
{
"alpha_fraction": 0.2975206673145294,
"alphanum_fraction": 0.44628098607063293,
"avg_line_length": 6.5625,
"blob_id": "4b4b48966df232c866c2a92ed2b93b97c8134db5",
"content_id": "65b0a5e787b5e208000f4d88d0967cd783ece8b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 121,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 16,
"path": "/doxygen/doxybook_output_vi/index_namespaces.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Namespaces\n\n---\n\n# Namespaces\n\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.8360655903816223,
"alphanum_fraction": 0.8360655903816223,
"avg_line_length": 39.66666793823242,
"blob_id": "7b82ccd846b2eef274022872c05b3922c3d01a0c",
"content_id": "e7533a95c6d1c751bf3e07af1299a905aa6a90ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 122,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 3,
"path": "/docs/developer_manual/joysticks.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Joysticks management\n\nJoystick supports are available in oricutron. Joysticks works exactly than on telestrat computer.\n"
},
{
"alpha_fraction": 0.41436463594436646,
"alphanum_fraction": 0.5138121843338013,
"avg_line_length": 9.647058486938477,
"blob_id": "64b313743b3f1d321d6a0fd724db1344fc8d6878",
"content_id": "a644759de5728e91f21a96a4843338c754492cb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 17,
"path": "/doxygen/doxybook_output_vi/index_classes.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Classes\n\n---\n\n# Classes\n\n\n\n\n* **struct [vi_struct_data](Classes/structvi__struct__data.md)** \n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 12:21:07 +0100\n"
},
{
"alpha_fraction": 0.7327044010162354,
"alphanum_fraction": 0.7358490824699402,
"avg_line_length": 21.714284896850586,
"blob_id": "c6638cd1f909b235d9fbcf75c0ef64b9025bee97",
"content_id": "4cee994c594024ad4f884096773bd41e5f3d0910",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 14,
"path": "/docs/commands/readdsk.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Readdsk\n\nDump a real floppy disk. It needs a real floppy drive controler to be attached with twilighte board\n\n## Usage\n\nreaddsk myfile.raw\n\n* -v : verbose\n* -vv : more verbose, displays sectors content\n* -h : this help\n* -V : displays version\n\nreaddsk produces a rawfile. raw2dsk must be launch to build a .dsk file\n"
},
{
"alpha_fraction": 0.5901639461517334,
"alphanum_fraction": 0.6065573692321777,
"avg_line_length": 18.33333396911621,
"blob_id": "65ec9f4d8b68e70cb03c60239071d1dae4f59ca5",
"content_id": "730dc777d9f7ab9d2b650965ef73d413ffbe7039",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 366,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 18,
"path": "/docs/kernel/primitives/xscrob.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XSCROB (Scroll down)\r\n\r\n## Description\r\n\r\nScroll all lines from the top to the bottom\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n ldx #$01 ; First line to scroll\r\n ldy #25 ; Last line to scroll\r\n BRK_KERNEL XSCROB\r\n rts\r\n```\r\n\r\n!!! tip \"See [scroll](../../developer_manual/orixsdk_macros/scroll) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.7166666388511658,
"alphanum_fraction": 0.7166666388511658,
"avg_line_length": 13.285714149475098,
"blob_id": "41ce2853b71efa5285b619c1e1ebc15183371ba9",
"content_id": "21681993c81621bfc6032c34b9e0aeb583006dc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 300,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 21,
"path": "/docs/commands/rm.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# rm\n\n## Introduction\n\nremove file\n\n## SYNOPSYS\n\n+ /# rm /myfile\n\n## DESCRIPTION\n\nrm file or directory. For instance, rm can only remove file in root folder, relative arg does not work yet\n\n## EXAMPLES\n\n+ rm /myffile\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/rm.asm\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5154777765274048,
"avg_line_length": 13.288461685180664,
"blob_id": "92997f3b059ff8c04ca832d0587f2fcfb8a65360",
"content_id": "efafde3ef19bab87434f802f1a506c7adf5e0181",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1486,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 104,
"path": "/docs/tools_docs/vi/Files/vi__switch__to__edition__mode_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_switch_to_edition_mode.s\n\n---\n\n# /Routines/vi_switch_to_edition_mode.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_switch_to_edition_mode](Files/vi__switch__to__edition__mode_8s.md#Routine-vi-switch-to-edition-mode)** |\n\n\n## Routine documentation\n\n### Routine vi_switch_to_edition_mode\n\n```ca65\nvi_switch_to_edition_mode\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_switch_to_edition_mode\n\n@loop:\n ; Display debug\n ldy vi_pos_debug\n jsr displays_debug\n ; end display debug\n\n jsr vi_compute_video_adress\n jsr vi_editor_switch_on_cursor\n cgetc\n\n cmp #KEY_ESC\n beq @exit\n\n cmp #KEY_UP\n beq @key_up\n\n cmp #KEY_DEL\n beq @key_del\n\n cmp #KEY_RETURN\n beq @key_enter\n\n cmp #KEY_DOWN\n beq @key_down\n\n cmp #KEY_RIGHT\n beq @key_right\n\n cmp #KEY_LEFT\n beq @key_left\n\n jsr @display_char\n jmp @loop\n\n@exit:\n rts\n\n@key_right:\n jsr vi_key_right\n jmp @loop\n\n@key_left:\n jsr vi_key_left\n jmp @loop\n\n@key_up:\n jsr vi_key_up\n jmp @loop\n\n@key_del:\n jsr vi_manage_del\n jmp @loop\n\n@key_enter:\n jsr vi_key_enter\n jmp @loop\n\n@key_down:\n jsr vi_key_down\n jmp @loop\n\n@display_char:\n jsr vi_display_char\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6177369952201843,
"alphanum_fraction": 0.6299694180488586,
"avg_line_length": 15.210526466369629,
"blob_id": "3448521e85d7c0dcbc00c6a5a839ba0f78cbe869",
"content_id": "1f9671abdb9b09bc1ab3e822d28cf8781bfd18ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 327,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 19,
"path": "/docs/kernel/primitives/xrd0.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XRD0\r\n\r\n## Usage\r\n\r\nCheck if a key is pressed\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n BRK_TELEMON XRD0 ; primitive exits even if no key had been pressed\r\n bcs @no_key_pressed\r\n ; When a key is pressed, A contains the ascii of the value\r\n@here_a_key_is_pressed:\r\n rts\r\n@no_key_pressed:\r\n rts\r\n```\r\n"
},
{
"alpha_fraction": 0.465753436088562,
"alphanum_fraction": 0.5018680095672607,
"avg_line_length": 13.600000381469727,
"blob_id": "b257ad84171aed57fa993961fa8c2f326e0f7be0",
"content_id": "3d804f766f5f4697e2fcf7722d75cc42632a264f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 55,
"path": "/doxygen/doxybook_output_vi/Files/vi__ptr__last__char__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ptr_last_char_plus_plus.s\n\n---\n\n# vi_ptr_last_char_plus_plus.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_last_char_plus_plus](Files/vi__ptr__last__char__plus__plus_8s.md#Routine-vi-ptr-last-char-plus-plus)** |\n\n\n## Routine documentation\n\n### Routine vi_ptr_last_char_plus_plus\n\n```ca65\nvi_ptr_last_char_plus_plus\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_ptr_last_char_plus_plus\n\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n clc\n adc #$01\n bcc @S1\n pha\n iny\n lda (vi_struct),y\n clc\n adc #$01\n sta (vi_struct),y\n dey\n pla\n@S1:\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5419968366622925,
"alphanum_fraction": 0.5729001760482788,
"avg_line_length": 19.03174591064453,
"blob_id": "bd1745d820ccebceb867529a0d8e1f6f56269463",
"content_id": "aa397ddf6646c3a83cd1347da345010c956d5c82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1262,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 63,
"path": "/doxygen/doxybook_output/Files/vi__copy__arg1__to__name__file__open_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_copy_arg1_to_name_file_open.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_copy_arg1_to_name_file_open.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_copy_arg1_to_name_file_open](Files/vi__copy__arg1__to__name__file__open_8s.md#function-vi-copy-arg1-to-name-file-open)**() |\n\n\n## Functions Documentation\n\n### function vi_copy_arg1_to_name_file_open\n\n```cpp\nvi_copy_arg1_to_name_file_open()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_copy_arg1_to_name_file_open\n ldy #$00\n sty vi_tmp1\n\n ldy #vi_struct_data::name_file_open\n sty vi_tmp2\n\n@loop_copy_filename:\n ldy vi_tmp1\n lda (vi_arg1),y\n beq @exit_copy_filename\n iny\n sty vi_tmp1\n ldy vi_tmp2\n sta (vi_struct),y\n iny\n sty vi_tmp2\n cpy #VI_MAX_LENGTH_FILENAME+vi_struct_data::name_file_open\n bne @loop_copy_filename\n\n lda #$00 ; EOS if overflow\n@exit_copy_filename:\n\n ldy vi_tmp2\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5816501975059509,
"alphanum_fraction": 0.610081136226654,
"avg_line_length": 22.846044540405273,
"blob_id": "1fec10b12fe59735e0a9fcceb5761889bdc3e2df",
"content_id": "adbbfcc816cee8aba3a195ba1439670a499515b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 16899,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 708,
"path": "/docs/tools_docs/vi/Files/vi_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /commands/vi.s\n\n---\n\n# /commands/vi.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[_vi](Files/vi_8s.md#Routine--vi)** |\n\n## Attributes\n\n| | Name |\n| -------------- | -------------- |\n| const char | **[CR](Files/vi_8s.md#variable-cr)** |\n| const char | **[LF](Files/vi_8s.md#variable-lf)** |\n| const char | **[IS_EOF](Files/vi_8s.md#variable-is-eof)** |\n| const char | **[IS_BEGINNING_OF_THE_FILE](Files/vi_8s.md#variable-is-beginning-of-the-file)** |\n| const char | **[IS_LAST_LINE_OF_SCREEN_TEXT](Files/vi_8s.md#variable-is-last-line-of-screen-text)** |\n| const char | **[IS_BEGINNING_OF_THE_LINE](Files/vi_8s.md#variable-is-beginning-of-the-line)** |\n| const char | **[VI_LAST_LINE_EDITOR](Files/vi_8s.md#variable-vi-last-line-editor)** <br>Last line used by the editor. |\n| const char | **[VI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE](Files/vi_8s.md#variable-vi-fill-screen-mode-stop-at-the-end-of-last-line)** |\n| const char | **[VI_COMMANDLINE_MAX_CHAR](Files/vi_8s.md#variable-vi-commandline-max-char)** |\n| const char | **[VI_MAX_LENGTH_FILE](Files/vi_8s.md#variable-vi-max-length-file)** |\n| const char | **[VI_EDITOR_CHAR_LIMITS_EMPTY](Files/vi_8s.md#variable-vi-editor-char-limits-empty)** |\n| const char | **[VI_COMMANDLINE_VIDEO_ADRESS](Files/vi_8s.md#variable-vi-commandline-video-adress)** |\n| const char | **[VI_EDITION_LAST_VIDEO_ADRESS](Files/vi_8s.md#variable-vi-edition-last-video-adress)** |\n| const char | **[VI_EDITION_VIDEO_ADRESS](Files/vi_8s.md#variable-vi-edition-video-adress)** |\n| const char | **[VI_EDITOR_MAX_LENGTH_OF_A_LINE](Files/vi_8s.md#variable-vi-editor-max-length-of-a-line)** |\n| const char | **[VI_EDITOR_MAX_COLUMN](Files/vi_8s.md#variable-vi-editor-max-column)** |\n| const char | **[vi_struct](Files/vi_8s.md#variable-vi-struct)** <br>2 bytes |\n| const char | **[vi_ptr1](Files/vi_8s.md#variable-vi-ptr1)** <br>2 bytes |\n| const char | **[vi_argv](Files/vi_8s.md#variable-vi-argv)** <br>2 bytes |\n| const char | **[vi_argc](Files/vi_8s.md#variable-vi-argc)** <br>1 bytes |\n| const char | **[vi_arg1](Files/vi_8s.md#variable-vi-arg1)** <br>2 bytes |\n| const char | **[vi_fp](Files/vi_8s.md#variable-vi-fp)** <br>2 bytes |\n| const char | **[vi_ptr_file](Files/vi_8s.md#variable-vi-ptr-file)** <br>2 bytes |\n| const char | **[vi_tmp2](Files/vi_8s.md#variable-vi-tmp2)** |\n| const char | **[vi_ptr_screen](Files/vi_8s.md#variable-vi-ptr-screen)** <br>2 bytes |\n| const char | **[vi_length_file](Files/vi_8s.md#variable-vi-length-file)** <br>2 bytes |\n| const char | **[vi_length_file_compute](Files/vi_8s.md#variable-vi-length-file-compute)** <br>2 bytes |\n| const char | **[vi_tmp1](Files/vi_8s.md#variable-vi-tmp1)** |\n| const char | **[vi_fileopened](Files/vi_8s.md#variable-vi-fileopened)** |\n| const char | **[vi_ptr_file_used](Files/vi_8s.md#variable-vi-ptr-file-used)** <br>2 bytes |\n| const char | **[tmp0_16](Files/vi_8s.md#variable-tmp0-16)** |\n| const char | **[vi_no_opened_file](Files/vi_8s.md#variable-vi-no-opened-file)** |\n| const char | **[vi_pos_debug](Files/vi_8s.md#variable-vi-pos-debug)** |\n| const char | **[vi_ptr2](Files/vi_8s.md#variable-vi-ptr2)** <br>2 bytes |\n| const char | **[vi_savex](Files/vi_8s.md#variable-vi-savex)** <br>2 bytes |\n| const char | **[vi_ptr3](Files/vi_8s.md#variable-vi-ptr3)** <br>2 bytes |\n| const char | **[vi_tmp3](Files/vi_8s.md#variable-vi-tmp3)** <br>2 bytes |\n\n## Defines\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_dec16_zp](Files/vi_8s.md#define-vi-dec16-zp)**(addr) |\n\n\n## Routine documentation\n\n### Routine _vi\n\n```ca65\n_vi\n```\n\n\n\n## Attributes Documentation\n\n### variable CR\n\n```ca65\nconst char CR = $0D;\n```\n\n\n### variable LF\n\n```ca65\nconst char LF = $0A;\n```\n\n\n### variable IS_EOF\n\n```ca65\nconst char IS_EOF = $00;\n```\n\n\n### variable IS_BEGINNING_OF_THE_FILE\n\n```ca65\nconst char IS_BEGINNING_OF_THE_FILE = $00;\n```\n\n\n### variable IS_LAST_LINE_OF_SCREEN_TEXT\n\n```ca65\nconst char IS_LAST_LINE_OF_SCREEN_TEXT = $01;\n```\n\n\n### variable IS_BEGINNING_OF_THE_LINE\n\n```ca65\nconst char IS_BEGINNING_OF_THE_LINE = $00;\n```\n\n\n### variable VI_LAST_LINE_EDITOR\n\n```ca65\nconst char VI_LAST_LINE_EDITOR = 26;\n```\n\nLast line used by the editor. \n\n### variable VI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE\n\n```ca65\nconst char VI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE = 0;\n```\n\n\n### variable VI_COMMANDLINE_MAX_CHAR\n\n```ca65\nconst char VI_COMMANDLINE_MAX_CHAR = 8;\n```\n\n\n### variable VI_MAX_LENGTH_FILE\n\n```ca65\nconst char VI_MAX_LENGTH_FILE = 2000;\n```\n\n\n### variable VI_EDITOR_CHAR_LIMITS_EMPTY\n\n```ca65\nconst char VI_EDITOR_CHAR_LIMITS_EMPTY = '~';\n```\n\n\n### variable VI_COMMANDLINE_VIDEO_ADRESS\n\n```ca65\nconst char VI_COMMANDLINE_VIDEO_ADRESS = $bb80+40*27;\n```\n\n\n### variable VI_EDITION_LAST_VIDEO_ADRESS\n\n```ca65\nconst char VI_EDITION_LAST_VIDEO_ADRESS = $bb80+40*26;\n```\n\n\n### variable VI_EDITION_VIDEO_ADRESS\n\n```ca65\nconst char VI_EDITION_VIDEO_ADRESS = $bb80;\n```\n\n\n### variable VI_EDITOR_MAX_LENGTH_OF_A_LINE\n\n```ca65\nconst char VI_EDITOR_MAX_LENGTH_OF_A_LINE = 255;\n```\n\n\n### variable VI_EDITOR_MAX_COLUMN\n\n```ca65\nconst char VI_EDITOR_MAX_COLUMN = 39;\n```\n\n\n### variable vi_struct\n\n```ca65\nconst char vi_struct = userzp;\n```\n\n2 bytes \n\n### variable vi_ptr1\n\n```ca65\nconst char vi_ptr1 = userzp+2;\n```\n\n2 bytes \n\n### variable vi_argv\n\n```ca65\nconst char vi_argv = userzp+4;\n```\n\n2 bytes \n\n### variable vi_argc\n\n```ca65\nconst char vi_argc = userzp+6;\n```\n\n1 bytes \n\n### variable vi_arg1\n\n```ca65\nconst char vi_arg1 = userzp+7;\n```\n\n2 bytes \n\n### variable vi_fp\n\n```ca65\nconst char vi_fp = userzp+9;\n```\n\n2 bytes \n\n### variable vi_ptr_file\n\n```ca65\nconst char vi_ptr_file = userzp+11;\n```\n\n2 bytes \n\n### variable vi_tmp2\n\n```ca65\nconst char vi_tmp2 = userzp+13;\n```\n\n\n### variable vi_ptr_screen\n\n```ca65\nconst char vi_ptr_screen = userzp+14;\n```\n\n2 bytes \n\n### variable vi_length_file\n\n```ca65\nconst char vi_length_file = userzp+16;\n```\n\n2 bytes \n\n### variable vi_length_file_compute\n\n```ca65\nconst char vi_length_file_compute = userzp+18;\n```\n\n2 bytes \n\n### variable vi_tmp1\n\n```ca65\nconst char vi_tmp1 = userzp+20;\n```\n\n\n### variable vi_fileopened\n\n```ca65\nconst char vi_fileopened = userzp+22;\n```\n\n\n### variable vi_ptr_file_used\n\n```ca65\nconst char vi_ptr_file_used = userzp+24;\n```\n\n2 bytes \n\n### variable tmp0_16\n\n```ca65\nconst char tmp0_16 = userzp+26;\n```\n\n\n### variable vi_no_opened_file\n\n```ca65\nconst char vi_no_opened_file = userzp+28;\n```\n\n\n### variable vi_pos_debug\n\n```ca65\nconst char vi_pos_debug = userzp+30;\n```\n\n\n### variable vi_ptr2\n\n```ca65\nconst char vi_ptr2 = userzp+32;\n```\n\n2 bytes \n\n### variable vi_savex\n\n```ca65\nconst char vi_savex = userzp+34;\n```\n\n2 bytes \n\n### variable vi_ptr3\n\n```ca65\nconst char vi_ptr3 = userzp+36;\n```\n\n2 bytes \n\n### variable vi_tmp3\n\n```ca65\nconst char vi_tmp3 = userzp+38;\n```\n\n2 bytes \n\n\n## Macros Documentation\n\n### define vi_dec16_zp\n\n```ca65\n#define vi_dec16_zp(\n addr\n)\n\n```\n\n\n## Source code\n\n```ca65\n; Limits max line : 256 bytes\n; Max file : VI_MAX_LENGTH_FILE (2000 bytes)\n; max edit char per line : 39\n\n; Max 2KB pour un fichier (arbitraire)\n; Pas de possibilité d'éditer une ligne de plus de 39 chars (donc pas de gestion de retour à la ligne quand une ligne fait plus de 40 chars\n; Quand on remonte ou quand on descend le curseur, il revient toujours à la position X=0 même si on était en milieu de ligne\n; Pas de gestion du mode écrasement\n; Pas de possibilité d'avoir une touche pour aller en fin de ligne\n; gère le mode windows pour les retours à la ligne, et ne marchera pas en retour chariot unix\n\n\n.macro vi_dec16_zp addr\n\n lda addr ; 98\n bne *+2 ; go to label\n dec addr+1\n;label:\n dec addr\n.endmacro\n\nCR = $0D\nLF = $0A\n\nIS_EOF = $00\nIS_BEGINNING_OF_THE_FILE = $00\nIS_LAST_LINE_OF_SCREEN_TEXT = $01\nIS_BEGINNING_OF_THE_LINE = $00\n\n\nVI_LAST_LINE_EDITOR = 26 ; Last line used by the editor\nVI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE = 0\nVI_COMMANDLINE_MAX_CHAR = 8\nVI_MAX_LENGTH_FILE = 2000\nVI_EDITOR_CHAR_LIMITS_EMPTY = '~'\nVI_COMMANDLINE_VIDEO_ADRESS := $bb80+40*27\nVI_EDITION_LAST_VIDEO_ADRESS := $bb80+40*26\nVI_EDITION_VIDEO_ADRESS := $bb80\nVI_EDITOR_MAX_LENGTH_OF_A_LINE = 255\nVI_EDITOR_MAX_COLUMN = 39\n\n\n\n;* labels prefixed with _ are populated from C*/\n vi_struct := userzp ; 2 bytes\n vi_ptr1 := userzp+2 ; 2 bytes\n vi_argv := userzp+4 ; 2 bytes\n vi_argc := userzp+6 ; 1 bytes\n vi_arg1 := userzp+7 ; 2 bytes\n vi_fp := userzp+9 ; 2 bytes\n vi_ptr_file := userzp+11 ; 2 bytes\n vi_tmp2 := userzp+13\n vi_ptr_screen := userzp+14 ; 2 bytes\n vi_length_file := userzp+16 ; 2 bytes\n vi_length_file_compute := userzp+18 ; 2 bytes\n vi_tmp1 := userzp+20\n vi_fileopened := userzp+22\n vi_ptr_file_used := userzp+24 ; 2 bytes\n tmp0_16 := userzp+26\n vi_no_opened_file := userzp+28\n vi_pos_debug := userzp+30\n vi_ptr2 := userzp+32 ; 2 bytes\n vi_savex := userzp+34 ; 2 bytes\n vi_ptr3 := userzp+36 ; 2 bytes\n vi_tmp3 := userzp+38 ; 2 bytes\n\n\n\n\n.include \"data/vi/strings.s\"\n.include \"Routines/vi_fill_screen_with_empty_line.s\"\n\n.include \"Routines/vi_struct.s\"\n.include \"Routines/vi_displays_info.s\"\n\n.include \"Routines/subfunc/vi/vi_init_vi_struct.s\"\n\n.include \"Routines/subfunc/vi/vi_ptr_last_char_plus_plus.s\"\n.include \"Routines/subfunc/vi/vi_ptr_last_char_sub_sub.s\"\n.include \"Routines/subfunc/vi/vi_set_ptr_last_char.s\"\n.include \"Routines/subfunc/vi/vi_ptr_last_char_add.s\"\n.include \"Routines/subfunc/vi/vi_vi_ptr_file_used_plus_plus.s\"\n\n.include \"Routines/subfunc/vi/vi_xpos_screen_plus_plus.s\"\n.include \"Routines/subfunc/vi/vi_xpos_screen_sub_sub.s\"\n\n.include \"Routines/subfunc/vi/vi_ypos_screen_sub_sub.s\"\n.include \"Routines/subfunc/vi/vi_ypos_screen_plus_plus.s\"\n\n.include \"Routines/subfunc/vi/vi_ptr_file_used_plus_plus.s\"\n.include \"Routines/subfunc/vi/vi_length_file_sub_sub.s\"\n.include \"Routines/subfunc/vi/vi_display_char.s\"\n.include \"Routines/subfunc/vi/vi_check_beginning_of_file.s\"\n.include \"Routines/subfunc/vi/vi_fill_last_line.s\"\n.include \"Routines/subfunc/vi/vi_copy_arg1_to_name_file_open.s\"\n.include \"Routines/subfunc/vi/vi_length_file_plus_plus.s\"\n.include \"Routines/subfunc/vi/vi_compute_video_adress.s\"\n.include \"Routines/subfunc/vi/vi_ptr_file_used_sub_sub.s\"\n.include \"Routines/subfunc/vi/vi_display_file_opened.s\"\n.include \"Routines/subfunc/vi/vi_check_0A.s\"\n.include \"Routines/subfunc/vi/vi_set_xpos_0.s\"\n.include \"Routines/subfunc/vi/vi_ptr_file_used_plus_plus_and_check_eof.s\"\n.include \"Routines/subfunc/vi/vi_search_next_line.s\"\n.include \"Routines/subfunc/vi/vi_shift_file_from_memory_one_char.s\"\n.include \"Routines/subfunc/vi/vi_check_inserted_char_overflow_the_max_column.s\"\n.include \"Routines/subfunc/vi/vi_scroll_from_left_to_right_full_line.s\"\n.include \"Routines/subfunc/vi/vi_shift_line_left_to_right_editor.s\"\n.include \"Routines/subfunc/vi/vi_search_previous_line_beginning.s\"\n.include \"Routines/subfunc/vi/vi_set_xpos_from_A.s\"\n.include \"Routines/subfunc/vi/vi_scroll_to_left.s\"\n.include \"Routines/subfunc/vi/vi_check_if_previous_line_was_truncated.s\"\n.include \"Routines/subfunc/vi/vi_search_previous_cr.s\"\n.include \"Routines/subfunc/vi/vi_add_char_to_text.s\"\n.include \"Routines/subfunc/vi/vi_strlen_current_line.s\"\n.include \"Routines/subfunc/vi/vi_compute_last_text_line.s\"\n\n.include \"Routines/subfunc/vi/vi_clear_command_line.s\"\n\n.include \"Routines/subfunc/vi/vi_key_down.s\"\n.include \"Routines/subfunc/vi/vi_key_enter.s\"\n.include \"Routines/subfunc/vi/vi_key_del.s\"\n.include \"Routines/subfunc/vi/vi_key_up.s\"\n.include \"Routines/subfunc/vi/vi_key_right.s\"\n.include \"Routines/subfunc/vi/vi_key_left.s\"\n.include \"Routines/subfunc/vi/vi_check_eof.s\"\n\n.include \"Routines/vi_put_char.s\"\n.include \"Routines/vi_command_edition.s\"\n.include \"Routines/vi_editor_switch_on_cursor.s\"\n.include \"Routines/vi_editor_switch_off_cursor.s\"\n.include \"Routines/vi_edition_keyboard.s\"\n.include \"Routines/vi_switch_to_edition_mode.s\"\n\n.include \"Routines/tables.s\"\n.include \"Routines/_clrscr_vi.s\"\n\n\n.proc _vi\n\n\n XMAINARGS = $2C\n XGETARGV = $2E\n argv := userzp ; 2 bytes\n argc := userzp+2 ; 1 byte\n\n lda #$00\n sta vi_no_opened_file\n\n\n lda #<$bb80\n sta vi_ptr_screen\n\n lda #>$bb80\n sta vi_ptr_screen+1\n\n\n lda #$00\n sta vi_fileopened\n\n\n malloc .sizeof(vi_struct_data)\n\n cmp #NULL\n bne @not_oom2\n cpy #NULL\n bne @not_oom2\n print str_OOM\n ; oom\n rts\n@not_oom2:\n\n sta vi_struct\n sty vi_struct+1\n\n jsr vi_init_vi_struct\n\n initmainargs vi_argv, vi_argc, 0\n\n lda vi_argc ; Do we have a file on the command line\n cmp #$01\n beq not_opened_file\n\n getmainarg #1, (vi_argv) ,vi_arg1\n\n ; Checking if vi_arg1 is not empty\n\n ; when we type \"vi [space]\" on command line, initmainargs returns argv=2 but the arg is empty\n ; This is a fix to avoid this\n ldy #$00\n@check_arg:\n lda (vi_arg1),y\n beq not_opened_file\n\n@check_filename:\n\n fopen (vi_arg1), O_RDONLY,,vi_fp\n\n cpx #$FF\n bne opened_file\n cmp #$FF\n bne opened_file\n beq not_opened_file\n\n\n\nopened_file:\n lda #$01\n sta vi_fileopened\n\n ; copy into name_file_open of the struct\n jsr vi_copy_arg1_to_name_file_open\n\nnot_opened_file:\n\n cursor OFF\n jsr _clrscr_vi\n\n malloc #VI_MAX_LENGTH_FILE,vi_ptr_file ; $376B\n\n lda vi_ptr_file ; $3769\n bne @ptr_file_continue\n lda vi_ptr_file+1\n bne @ptr_file_continue\n print str_OOM\n rts\n\n@ptr_file_continue:\n ; Set last char of the file ptr\n\n lda vi_ptr_file\n ldx vi_ptr_file+1\n jsr vi_set_ptr_last_char\n\n lda vi_ptr_file ; Contains the ptr $376B\n sta vi_ptr_file_used\n\n lda vi_ptr_file+1\n sta vi_ptr_file_used+1\n\n lda vi_fileopened\n beq @skip_loading\n\n ; Now load the file\n fread (vi_ptr_file_used), #VI_MAX_LENGTH_FILE, 1, vi_fp ; myptr is from a malloc for example\n\n sta vi_length_file\n sta vi_length_file_compute\n stx vi_length_file+1\n stx vi_length_file_compute+1\n fclose(vi_fp)\n\n\n lda vi_ptr_file\n sta vi_ptr1\n sta vi_ptr_file_used\n\n\n lda vi_ptr_file+1\n sta vi_ptr1+1\n sta vi_ptr_file_used+1\n\n ; now set vi_ptr_last_char with the length of the file\n\n lda vi_length_file\n ldx vi_length_file+1\n jsr vi_ptr_last_char_add\n\n ; Check eof\n ; Dans le cas où on a chargé avant un fichier, comme on ne s'arrete pas à l'eof du fichier courant, cela va continuer a s'afficher alors même qu'on est à la fin du fichier\n ; Cela va afficher l'ancien fichier chargé. On compare donc pour s'arreter.\n\n jsr vi_display_file_opened\n\n@skip_fill_last_line:\n\n ldx #$01\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n beq @no_compute ; First line ? Then X=1 in order to display ~ on the second line\n cmp #VI_LAST_LINE_EDITOR\n beq @no_empty_line_begin\n\n tax\n inx\n\n@no_compute:\n ldy #$01\n jmp @display_empty_line_begin\n\n@skip_loading:\n ldy #$01 ; For a new_file\n ldx #$01\n@display_empty_line_begin:\n jsr vi_fill_screen_with_empty_line\n\n@no_empty_line_begin:\n ; Set cursor position to 0,0\n ldy #vi_struct_data::ypos_screen\n lda #$00\n sta (vi_struct),y\n\n ldy #vi_struct_data::xpos_screen\n sta (vi_struct),y\n\n lda vi_ptr_file ; Contains the ptr\n sta vi_ptr_file_used\n\n lda vi_ptr_file+1\n sta vi_ptr_file_used+1\n\n\n@loop_until_esc_is_pressed:\n\n jsr vi_edition_keyboard\n\n cmp #$01\n\n beq @final_exit\n jmp @loop_until_esc_is_pressed\n\n@final_exit:\n rts\n\n; **********************************\n\n\n\n\nstr_OOM:\n .asciiz \"OOM\" ; FIXME import from general lib\nstr_not_found:\n .asciiz \"File not found\"\n\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.56685471534729,
"alphanum_fraction": 0.5836389064788818,
"avg_line_length": 28.297521591186523,
"blob_id": "0c31527228c3eb0c545923fed9b77e21006bfb73",
"content_id": "22fb323eb4256a6bfcd754653001a0274d5fad66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 7090,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 242,
"path": "/kernel/index.html",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<!--[if IE 8]><html class=\"no-js lt-ie9\" lang=\"en\" > <![endif]-->\n<!--[if gt IE 8]><!--> <html class=\"no-js\" lang=\"en\" > <!--<![endif]-->\n<head>\n <meta charset=\"utf-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <meta name=\"description\" content=\"None\">\n \n \n <link rel=\"shortcut icon\" href=\"img/favicon.ico\">\n <title>Kernel</title>\n <link rel=\"stylesheet\" href=\"https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700\" />\n\n <link rel=\"stylesheet\" href=\"css/theme.css\" />\n <link rel=\"stylesheet\" href=\"css/theme_extra.css\" />\n <link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" />\n \n <script>\n // Current page data\n var mkdocs_page_name = \"General information\";\n var mkdocs_page_input_path = \"index.md\";\n var mkdocs_page_url = null;\n </script>\n \n <script src=\"js/jquery-2.1.1.min.js\" defer></script>\n <script src=\"js/modernizr-2.8.3.min.js\" defer></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n <script>hljs.initHighlightingOnLoad();</script> \n \n</head>\n\n<body class=\"wy-body-for-nav\" role=\"document\">\n\n <div class=\"wy-grid-for-nav\">\n\n \n <nav data-toggle=\"wy-nav-shift\" class=\"wy-nav-side stickynav\">\n <div class=\"wy-side-scroll\">\n <div class=\"wy-side-nav-search\">\n <a href=\".\" class=\"icon icon-home\"> Kernel</a>\n <div role=\"search\">\n <form id =\"rtd-search-form\" class=\"wy-form\" action=\"./search.html\" method=\"get\">\n <input type=\"text\" name=\"q\" placeholder=\"Search docs\" title=\"Type search term here\" />\n </form>\n</div>\n </div>\n\n <div class=\"wy-menu wy-menu-vertical\" data-spy=\"affix\" role=\"navigation\" aria-label=\"main navigation\">\n <ul class=\"current\">\n <li class=\"toctree-l1 current\"><a class=\"reference internal current\" href=\".\">General information</a>\n <ul class=\"current\">\n </ul>\n </li>\n </ul>\n <ul>\n <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"return/\">Return to main documentation</a>\n </li>\n </ul>\n <p class=\"caption\"><span class=\"caption-text\">Dynamiclink</span></p>\n <ul>\n <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"dynamiclink/XCALLFUNCTLIB/\">XCALLFUNCTLIB</a>\n </li>\n <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"dynamiclink/XOPENLIB/\">XOPENLIB</a>\n </li>\n <li class=\"toctree-l1\"><a class=\"reference internal\" href=\"dynamiclink/dynlibformat/\">Dynamic lib format</a>\n </li>\n </ul>\n </div>\n </div>\n </nav>\n\n <section data-toggle=\"wy-nav-shift\" class=\"wy-nav-content-wrap\">\n\n \n <nav class=\"wy-nav-top\" role=\"navigation\" aria-label=\"top navigation\">\n <i data-toggle=\"wy-nav-top\" class=\"fa fa-bars\"></i>\n <a href=\".\">Kernel</a>\n </nav>\n\n \n <div class=\"wy-nav-content\">\n <div class=\"rst-content\">\n <div role=\"navigation\" aria-label=\"breadcrumbs navigation\">\n <ul class=\"wy-breadcrumbs\">\n <li><a href=\".\">Docs</a> »</li>\n \n \n \n <li>General information</li>\n <li class=\"wy-breadcrumbs-aside\">\n \n </li>\n </ul>\n \n <hr/>\n</div>\n <div role=\"main\">\n <div class=\"section\">\n \n <h1 id=\"general-information\">General information</h1>\n<h1 id=\"install-environment-development-for-developers\">Install environment development for developers</h1>\n<ul>\n<li><a href=\"getting-started\">Getting-started</a></li>\n</ul>\n<h2 id=\"kernel-primitives\">Kernel primitives</h2>\n<ul>\n<li><a href=\"primitives\">Primitives</a></li>\n<li>\n<p><a href=\"mount\">Tools</a></p>\n</li>\n<li>\n<p>Doxygen for <a href=\"doxygen/kernel\">kernel</a>.</p>\n</li>\n</ul>\n<h2 id=\"samples-for-kernel-calls\">Samples for kernel calls</h2>\n<h3 id=\"get-ctrlc-hook\">Get ctrl+c hook</h3>\n<pre><code class=\"ca65\"> .include "telestrat.inc"\n asl KBDCTC\n bcc @no_ctrl\n ; here is ctrl management\n@no_ctrl:\n rts\n\n</code></pre>\n\n<h3 id=\"test-kernel-error-when-fopen-failed\">Test kernel Error when fopen failed</h3>\n<pre><code class=\"ca65\"> ; Open\n ... \n BRK_KERNEL XOPEN \n cpy #$00\n bne @read_rom \n cmp #$00\n bne @read_rom \n\n ldx #$04 ; Get kernel ERRNO\n BRK_KERNEL XVARS\n sta userzp\n sty userzp+1\n\n ldy #$00\n lda (userzp),y ; get ERRNO from kernel\n cmp #ENOMEM\n bne @no_enomem_kernel_error\n PRINT str_enomem\n\n@no_enomem_kernel_error:\n cmp #ENOENT\n bne @no_enoent_kernel_error\n PRINT str_not_found\n</code></pre>\n\n<h3 id=\"stop-output-when-spacebar-is-pressed-for-example\">Stop output when spacebar is pressed (for example)</h3>\n<pre><code class=\"ca65\"> .include "telestrat.inc"\n@L12:\n BRK_KERNEL XRD0\n bcs @no_char_action\n cmp #' ' ; Space pressed ?\n bne @no_char ; no continue\n\n lda myOffsetToManageSwitch ; One byte\n beq @inv_to_1\n\n lda #$00\n sta myOffsetToManageSwitch\n jmp @L12\n\n@inv_to_1:\n inc myOffsetToManageSwitch\n jmp @L12\n\n@no_char_action:\n lda myOffsetToManageSwitch\n beq @L12\n\n@no_char:\n ; No keypressed\n</code></pre>\n\n<h3 id=\"displays-a-string\">Displays a string</h3>\n<pre><code class=\"ca65\"> lda #<mystr\n ldy #>mystr\n BRK_KERNEL XWRSTR0\n rts\n mysstr:\n .asciiz "hello" \n</code></pre>\n \n </div>\n </div>\n <footer>\n \n <div class=\"rst-footer-buttons\" role=\"navigation\" aria-label=\"footer navigation\">\n \n <a href=\"return/\" class=\"btn btn-neutral float-right\" title=\"Return to main documentation\">Next <span class=\"icon icon-circle-arrow-right\"></span></a>\n \n \n </div>\n \n\n <hr/>\n\n <div role=\"contentinfo\">\n <!-- Copyright etc -->\n \n </div>\n\n Built with <a href=\"https://www.mkdocs.org/\">MkDocs</a> using a <a href=\"https://github.com/snide/sphinx_rtd_theme\">theme</a> provided by <a href=\"https://readthedocs.org\">Read the Docs</a>.\n</footer>\n \n </div>\n </div>\n\n </section>\n\n </div>\n\n <div class=\"rst-versions\" role=\"note\" aria-label=\"versions\">\n <span class=\"rst-current-version\" data-toggle=\"rst-current-version\">\n \n \n \n <span style=\"margin-left: 15px\"><a href=\"return/\" style=\"color: #fcfcfc\">Next »</a></span>\n \n </span>\n</div>\n <script>var base_url = '.';</script>\n <script src=\"js/theme.js\" defer></script>\n <script src=\"search/main.js\" defer></script>\n <script defer>\n window.onload = function () {\n SphinxRtdTheme.Navigation.enable(true);\n };\n </script>\n\n</body>\n</html>\n\n<!--\nMkDocs version : 1.1.2\nBuild Date UTC : 2021-11-14 08:10:22.117729+00:00\n-->\n"
},
{
"alpha_fraction": 0.7121986746788025,
"alphanum_fraction": 0.7348429560661316,
"avg_line_length": 33.150001525878906,
"blob_id": "409d5c15c7f5505a1466bc39c0ad860301d92a65",
"content_id": "0119f6fac82837cbd84936dc433d7fd8f3f98862",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1396,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 40,
"path": "/hardware/docs/stratos/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Stratos\n\n## Specs Générales\n\n* 15 Mo de SRAM\n* 1 mo d'eeprom\n* Voir s'il faut faire une SRAM séparée pour la vidéo à terme (mais passer en QFP 144 car pas assez de pinout pour séparer les bus)\n* ULA 2 améliorée\n* 65c816 à 14Mhz\n* port USB, port sdcard (stockage principal)\n* Os : Orix \n* langage : forth, basic atmos etc ...\n* Peut être placé dans le boitier atmos. Il reste néanmoins la question des ports joysticks qu'il faut arriver à faire sortir du boitier\n\n## Fonctionnement atmos compatible\n* La rom atmos est prête pour lire des .tap directement\n* sedoric version sdcard est en cours d'écriture\n* ftdos en cours d'écriture\n* Detection du mode en fonction de la broche E du cpu ? Si émulation = mode atmos ?\n* Lancement de basic11 : chargement en RAM de la rom associée dans les 16 Mo par malloc, puis virtualisation des adresses dans le cpld avec un registre ?\n\n## Cas non gérés\n* WIFI : pour l'instant port ethernet avec stack tcp/ip hardware (voir port ethernet), voir s'il faut un ESP32\n* Réel FDC \n\n## Controleur usb, sdcard, clavier, souris\n\n* Ch376 : fat 32, port USB, HID, drivers écrits (émulation aussi)\n\n## Port ethernet \n\n* ch395 : stack tcp/ip, socket gérés : 8, icmp ...\n\n## EEprom \n* kernel et tout ce qui se met en rom tel que shell, forth etc.\n* Programmation : orixcfg\n\n#Kernel Orix 16bits\n\nappels via jsr avec un paramètre de primitive ?\n\n\n\n"
},
{
"alpha_fraction": 0.7244898080825806,
"alphanum_fraction": 0.7244898080825806,
"avg_line_length": 10.529411315917969,
"blob_id": "809778801f1906dad54bd1524c56a9cc871a23d5",
"content_id": "ace44c2ac430544c07a717c7653015105beecba5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 17,
"path": "/docs/commands/uname.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# uname\n\n## Introduction\n\nDisplay kernel info\n\n## SYNOPSYS\n\n+ uname [-a]\n\n## DESCRIPTION\n\nDisplay kernel info\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/uname.asm\n"
},
{
"alpha_fraction": 0.7450980544090271,
"alphanum_fraction": 0.7450980544090271,
"avg_line_length": 14,
"blob_id": "12b7e05fefa6fba24fde5721c3adcc8ba9cca7c4",
"content_id": "9be8a95130b78b2544c6dbeab4ee5833e1b96d05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 17,
"path": "/docs/commands/env.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# env\n\n## Introduction\n\nDisplays env variables\n\n## SYNOPSYS\n\n+ #env\n\n## DESCRIPTION\n\nDisplays env variables. It can't be used in command line or shell script for instance\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/env.asm\n"
},
{
"alpha_fraction": 0.8064516186714172,
"alphanum_fraction": 0.8064516186714172,
"avg_line_length": 30,
"blob_id": "9d0af7c8ad9d372a4c42e5f913a4ecf5309ac537",
"content_id": "a0f207a64dfcc91a5749003b59248e1c8ffd3937",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 1,
"path": "/doxygen/vi/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Vi source code documentation\n"
},
{
"alpha_fraction": 0.729903519153595,
"alphanum_fraction": 0.7331189513206482,
"avg_line_length": 18.375,
"blob_id": "8d0dc29d96745d28fb3fb36e431d40ef9a587329",
"content_id": "f7877da94f9dc46e83f2f5c9bd34c77f9703a5de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 16,
"path": "/docs/commands/viewscr.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: viewscr\n\n### viewscr utility\n\n## SYNOPSYS\n+ viewscr file [...]\n\n## EXAMPLES\n+ viewscr format.scr\n+ viewscr address.win monac1.hlp\n\n## DESCRIPTION\nDisplay TEXT screen dump files like SCR files from FTDOS disks or HLP, WIN files from Sedoric disks.\n\n## SOURCE\nhttps://github.com/orix-software/viewscr\n\n"
},
{
"alpha_fraction": 0.7452786564826965,
"alphanum_fraction": 0.7581759691238403,
"avg_line_length": 48.44186019897461,
"blob_id": "2482bd8e126bf7965e94fb89bd1cd334d62ac102",
"content_id": "eb2addce0863642e3c34c798d5254c67e2485593",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2171,
"license_type": "no_license",
"max_line_length": 276,
"num_lines": 43,
"path": "/docs/hardware/nappe.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Ribbon cable management on twilighte board\r\n\r\nThe twilighte board is built without ribbon cable but it can be done with amplibus and a ribbon cable. It works only if there is 2 lines of 74hct541 (see bellow).\r\n\r\nThis trick is also useful for Oric which does not want to start with the board. The board has some incompatibility issue with some oric, but it's can be connected with 2 lines of 74HCT541. One from an amplibus for example, and another from the twilighte board expansion board.\r\n\r\n## First step build the ribbon cable\r\n\r\nGet a floppy disk ribbon cable. And put a female HE10 34 like this at approximately 13 cms (It could be more, but greater cable is not tested yet)from the previous connector :\r\n\r\nPut the notch (black connector) at the bottom and crimp the HE10 34 connector (Grey connector) like this :\r\n\r\n\r\n\r\nOn the picture the notch is not visible on the black connector, because it's on the other side. But the hole on the grey connector is visible. It's really important to keep theses positions.\r\n\r\nRibbon cable tested :\r\n\r\n* 10 cms (OK)\r\n* 20 cms (OK)\r\n* 30 cms (OK)\r\n\r\nNow, we can use any amplibus or another expansion board from the twilighte board\r\n\r\n## with a simple amplibus\r\n\r\nIf you have a amplibus as Silicebit one (see the picture), you can now connect the ribbon cable to these amplibus (Black connector on the previous picture) (notch must be at the bottom as the Silicebit amplibus permits).\r\n\r\n\r\n\r\nAnd plug the grey connector to twilighte board expansion board. The grey connector must be with the hole at the top when connected to expansion board.\r\n\r\n\r\n\r\n## You have an extra twilighte expansion board\r\n\r\nif you have this one, you can not connect without a connector modification. The angle connector to the twilighte board side must be to the top of the board. The previous ribbon cable can be connected with the notch at the bottom of the card like the next picture\r\n\r\n\r\n\r\nFull overview :\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7954545617103577,
"alphanum_fraction": 0.7954545617103577,
"avg_line_length": 13.666666984558105,
"blob_id": "f66a29a04efd6fc8eb133f5443e5fcfc12d9be0a",
"content_id": "7748097d1d59393e2abbb0e32d011baf527c9a68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 3,
"path": "/docs/commands/df.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# df \n\nDisplays storage device informations\n"
},
{
"alpha_fraction": 0.6873508095741272,
"alphanum_fraction": 0.7207637429237366,
"avg_line_length": 19.842105865478516,
"blob_id": "c21b8a9141a6af47b0d7789509d5bed77f0062ef",
"content_id": "86cf4ba99128859296de2b83567aa8eea7d92732",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 19,
"path": "/docs/developer_manual/firmware.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Firmwares\r\n\r\nYou can get firmare version with $342 register and get b0,b1,b2. It will show the firmware version.\r\n\r\nThere is only 2 versions availables :\r\n\r\n## Firmware 1\r\n\r\n## Firmware 2\r\n\r\nManage microdisc compatibility : it handes $314 register to switch on the right ram overlay bank\r\n\r\n## Firmware 3\r\n\r\nUnder development\r\n\r\n## Firmware 4\r\n\r\nIt's under development but it will handles others hardware chips.\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.3664596378803253,
"alphanum_fraction": 0.47826087474823,
"avg_line_length": 8.470588684082031,
"blob_id": "4c525586ff214e139e7f5cdbd9153b1bfca61233",
"content_id": "15d2d20d42ac075b9a4f415bca488f2c06db798c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 17,
"path": "/doxygen/doxybook_output_vi/index_pages.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Pages\n\n---\n\n# Pages\n\n\n\n\n* **page [strlen](Pages/strlen.md#page-strlen)** \n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:18 +0100\n"
},
{
"alpha_fraction": 0.5304540395736694,
"alphanum_fraction": 0.564784049987793,
"avg_line_length": 15.418181419372559,
"blob_id": "3bf0c87c621cb16cf8d98f6ad5b62e5fc30665e5",
"content_id": "b8f34a310597bfe629e8856dc84ba1133839ec29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 903,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 55,
"path": "/doxygen/doxybook_output_vi/Files/vi__check__beginning__of__file_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_check_beginning_of_file.s\n\n---\n\n# vi_check_beginning_of_file.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_beginning_of_file](Files/vi__check__beginning__of__file_8s.md#Routine-vi-check-beginning-of-file)** |\n\n\n## Routine documentation\n\n### Routine vi_check_beginning_of_file\n\n```ca65\nvi_check_beginning_of_file\n```\n\n\n\n\n## Source code\n\n```ca65\n; A returns 00 if beginning of the file, 01 if not\n\n.proc vi_check_beginning_of_file\n lda vi_ptr_file_used\n cmp vi_ptr_file\n bne @not_beginning\n\n lda vi_ptr_file_used+1\n cmp vi_ptr_file+1\n bne @not_beginning\n\n lda #IS_BEGINNING_OF_THE_FILE ; Beginninng of the file\n rts\n\n@not_beginning:\n lda #$01 ; Not the Beginninng of the file\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5151315927505493,
"alphanum_fraction": 0.5434210300445557,
"avg_line_length": 17.095237731933594,
"blob_id": "969e87358ff86cc3f7d1f985d9a6b259945de812",
"content_id": "75ebbfc13d04cc68388e362c352ef4842e1a3b1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1520,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 84,
"path": "/doxygen/doxybook_output/Files/vi__fill__last__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_fill_last_line.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_fill_last_line.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_fill_last_line](Files/vi__fill__last__line_8s.md#function-vi-fill-last-line)**() |\n\n\n## Functions Documentation\n\n### function vi_fill_last_line\n\n```cpp\nvi_fill_last_line()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_fill_last_line\n\n ; A and Y contains the ptr on the file to display on the last line\n sta vi_ptr1\n sty vi_ptr1+1\n\n ldx #$00\n@compute_empty_line_loop:\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1\n cmp vi_tmp1\n bne @not_eof\n\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1+1\n cmp vi_tmp1\n bne @not_eof\n ; reached the end of the file\n rts\n\n@not_eof:\n inc vi_ptr1\n bne @S40\n inc vi_ptr1+1\n\n@S40:\n ldy #$00\n lda (vi_ptr1),y\n cmp #CR\n beq @exit\n cmp #LF\n beq @compute_empty_line_loop\n sta VI_EDITION_LAST_VIDEO_ADRESS,x\n inx\n cpx #VI_EDITOR_MAX_COLUMN+1\n beq @exit\n jmp @compute_empty_line_loop\n\n@exit:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.5951557159423828,
"avg_line_length": 11.136363983154297,
"blob_id": "062aa7727116dcc1a2a267937b5aa2781720eb00",
"content_id": "bb59c376f4668277ee003fd8105b56bd2abfdc94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 22,
"path": "/docs/developer_manual/orixsdk_macros/explode.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Explode\r\n\r\n## Description\r\n\r\nProduce an explode sound\r\n\r\n## Usage\r\n\r\nexplode\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_sound.mac\"\r\n\r\n explode\r\n rts\r\n\r\n```\r\n\r\nCall [XEXPLODE](../../../kernel/primitives/xexplode/) kernel function.\r\n"
},
{
"alpha_fraction": 0.5450831055641174,
"alphanum_fraction": 0.6375374794006348,
"avg_line_length": 237.37661743164062,
"blob_id": "35f9a0e5a9c1b00457f6653037ae5d26bfb35db8",
"content_id": "988917c298ff9a2d81dcb2523690f36b6c52b5df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 18355,
"license_type": "no_license",
"max_line_length": 15559,
"num_lines": 77,
"path": "/doxygen/kernel/orix_8inc_source.html",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/xhtml;charset=UTF-8\"/>\n<meta http-equiv=\"X-UA-Compatible\" content=\"IE=9\"/>\n<meta name=\"generator\" content=\"Doxygen 1.8.13\"/>\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"/>\n<title>Orix : Kernel: /home/jede/oric/kernel/src/orix.inc Source File</title>\n<link href=\"tabs.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<script type=\"text/javascript\" src=\"jquery.js\"></script>\n<script type=\"text/javascript\" src=\"dynsections.js\"></script>\n<link href=\"search/search.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<script type=\"text/javascript\" src=\"search/searchdata.js\"></script>\n<script type=\"text/javascript\" src=\"search/search.js\"></script>\n<link href=\"doxygen.css\" rel=\"stylesheet\" type=\"text/css\" />\n</head>\n<body>\n<div id=\"top\"><!-- do not remove this div, it is closed by doxygen! -->\n<div id=\"titlearea\">\n<table cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr style=\"height: 56px;\">\n <td id=\"projectalign\" style=\"padding-left: 0.5em;\">\n <div id=\"projectname\">Orix : Kernel\n </div>\n </td>\n </tr>\n </tbody>\n</table>\n</div>\n<!-- end header part -->\n<!-- Generated by Doxygen 1.8.13 -->\n<script type=\"text/javascript\">\nvar searchBox = new SearchBox(\"searchBox\", \"search\",false,'Search');\n</script>\n<script type=\"text/javascript\" src=\"menudata.js\"></script>\n<script type=\"text/javascript\" src=\"menu.js\"></script>\n<script type=\"text/javascript\">\n$(function() {\n initMenu('',true,false,'search.php','Search');\n $(document).ready(function() { init_search(); });\n});\n</script>\n<div id=\"main-nav\"></div>\n<!-- window showing the filter options -->\n<div id=\"MSearchSelectWindow\"\n onmouseover=\"return searchBox.OnSearchSelectShow()\"\n onmouseout=\"return searchBox.OnSearchSelectHide()\"\n onkeydown=\"return searchBox.OnSearchSelectKey(event)\">\n</div>\n\n<!-- iframe showing the search results (closed by default) -->\n<div id=\"MSearchResultsWindow\">\n<iframe src=\"javascript:void(0)\" frameborder=\"0\" \n name=\"MSearchResults\" id=\"MSearchResults\">\n</iframe>\n</div>\n\n<div id=\"nav-path\" class=\"navpath\">\n <ul>\n<li class=\"navelem\"><a class=\"el\" href=\"dir_68267d1309a1af8e8297ef4c3efbcdba.html\">src</a></li> </ul>\n</div>\n</div><!-- top -->\n<div class=\"header\">\n <div class=\"headertitle\">\n<div class=\"title\">orix.inc</div> </div>\n</div><!--header-->\n<div class=\"contents\">\n<div class=\"fragment\"><div class=\"line\"><a name=\"l00001\"></a><span class=\"lineno\"> 1</span> BASH_MAX_ARGS = 3 ; Number of possible args in the command line</div><div class=\"line\"><a name=\"l00002\"></a><span class=\"lineno\"> 2</span> </div><div class=\"line\"><a name=\"l00003\"></a><span class=\"lineno\"> 3</span> BASH_MAX_BUFEDT_LENGTH = 110</div><div class=\"line\"><a name=\"l00004\"></a><span class=\"lineno\"> 4</span> userzp := VARLNG</div><div class=\"line\"><a name=\"l00005\"></a><span class=\"lineno\"> 5</span> </div><div class=\"line\"><a name=\"l00006\"></a><span class=\"lineno\"> 6</span> RETURN_BANK_READ_BYTE_FROM_OVERLAY_RAM := $78</div><div class=\"line\"><a name=\"l00007\"></a><span class=\"lineno\"> 7</span> </div><div class=\"line\"><a name=\"l00008\"></a><span class=\"lineno\"> 8</span> NEXT_STACK_BANK := $0418</div><div class=\"line\"><a name=\"l00009\"></a><span class=\"lineno\"> 9</span> </div><div class=\"line\"><a name=\"l00010\"></a><span class=\"lineno\"> 10</span> MOUSE_JOYSTICK_MANAGEMENT := $291 ; 12 bytes ?</div><div class=\"line\"><a name=\"l00011\"></a><span class=\"lineno\"> 11</span> i_o_counter := $1A ; 1 byte</div><div class=\"line\"><a name=\"l00012\"></a><span class=\"lineno\"> 12</span> i_o_save := $1B ; 3 bytes ?</div><div class=\"line\"><a name=\"l00013\"></a><span class=\"lineno\"> 13</span> TRANSITION_RS232:= $1E; 3 bytes</div><div class=\"line\"><a name=\"l00014\"></a><span class=\"lineno\"> 14</span> </div><div class=\"line\"><a name=\"l00015\"></a><span class=\"lineno\"> 15</span> KEYBOARD_COUNTER:=$02A6 ; 4 bytes</div><div class=\"line\"><a name=\"l00016\"></a><span class=\"lineno\"> 16</span> VIA_UNKNOWN :=$028F ; seems tobe a backup of timer 2 bytes</div><div class=\"line\"><a name=\"l00017\"></a><span class=\"lineno\"> 17</span> </div><div class=\"line\"><a name=\"l00018\"></a><span class=\"lineno\"> 18</span> TELEMON_ID_BANK = $07</div><div class=\"line\"><a name=\"l00019\"></a><span class=\"lineno\"> 19</span> ATMOS_ID_BANK = $06</div><div class=\"line\"><a name=\"l00020\"></a><span class=\"lineno\"> 20</span> ORIX_ID_BANK = $05</div><div class=\"line\"><a name=\"l00021\"></a><span class=\"lineno\"> 21</span> MONITOR_ID_BANK = $04</div><div class=\"line\"><a name=\"l00022\"></a><span class=\"lineno\"> 22</span> </div><div class=\"line\"><a name=\"l00023\"></a><span class=\"lineno\"> 23</span> ORIX_MEMORY_DRIVER_ADDRESS:=$400</div><div class=\"line\"><a name=\"l00024\"></a><span class=\"lineno\"> 24</span> SWITCH_TO_BANK_ID := $040C</div><div class=\"line\"><a name=\"l00025\"></a><span class=\"lineno\"> 25</span> ;NEXT_STACK_BANK:=$418</div><div class=\"line\"><a name=\"l00026\"></a><span class=\"lineno\"> 26</span> FIXME_PAGE0_0:=$25</div><div class=\"line\"><a name=\"l00027\"></a><span class=\"lineno\"> 27</span> ORIX_VECTOR_READ_VALUE_INTO_RAM_OVERLAY:=$411 ; .dsb 3</div><div class=\"line\"><a name=\"l00028\"></a><span class=\"lineno\"> 28</span> </div><div class=\"line\"><a name=\"l00029\"></a><span class=\"lineno\"> 29</span> work_channel :=$19 ; 1 byte</div><div class=\"line\"><a name=\"l00030\"></a><span class=\"lineno\"> 30</span> KBD_UNKNOWN :=$271 ;FIXME</div><div class=\"line\"><a name=\"l00031\"></a><span class=\"lineno\"> 31</span> </div><div class=\"line\"><a name=\"l00032\"></a><span class=\"lineno\"> 32</span> ; ORIX_PATH_CURRENT:=$0525 ;FIXME</div><div class=\"line\"><a name=\"l00033\"></a><span class=\"lineno\"> 33</span> FUFTRV=$0100; working Buffer </div><div class=\"line\"><a name=\"l00034\"></a><span class=\"lineno\"> 34</span> </div><div class=\"line\"><a name=\"l00035\"></a><span class=\"lineno\"> 35</span> </div><div class=\"line\"><a name=\"l00036\"></a><span class=\"lineno\"> 36</span> TELEMON_KEYBOARD_BUFFER_BEGIN = $C5C4</div><div class=\"line\"><a name=\"l00037\"></a><span class=\"lineno\"> 37</span> TELEMON_KEYBOARD_BUFFER_END = $C680 </div><div class=\"line\"><a name=\"l00038\"></a><span class=\"lineno\"> 38</span> TELEMON_ACIA_BUFFER_INPUT_BEGIN = $C680</div><div class=\"line\"><a name=\"l00039\"></a><span class=\"lineno\"> 39</span> TELEMON_ACIA_BUFFER_INPUT_END = $C800</div><div class=\"line\"><a name=\"l00040\"></a><span class=\"lineno\"> 40</span> </div><div class=\"line\"><a name=\"l00041\"></a><span class=\"lineno\"> 41</span> TELEMON_ACIA_BUFFER_OUTPUT_BEGIN = $C800</div><div class=\"line\"><a name=\"l00042\"></a><span class=\"lineno\"> 42</span> TELEMON_ACIA_BUFFER_OUTPUT_END = $CA00</div><div class=\"line\"><a name=\"l00043\"></a><span class=\"lineno\"> 43</span>  </div><div class=\"line\"><a name=\"l00044\"></a><span class=\"lineno\"> 44</span> TELEMON_PRINTER_BUFFER_BEGIN = $CA00</div><div class=\"line\"><a name=\"l00045\"></a><span class=\"lineno\"> 45</span> TELEMON_PRINTER_BUFFER_END = $D200</div><div class=\"line\"><a name=\"l00046\"></a><span class=\"lineno\"> 46</span> </div><div class=\"line\"><a name=\"l00047\"></a><span class=\"lineno\"> 47</span> HISTORY_BUFFER_BEGIN = TELEMON_PRINTER_BUFFER_END+1</div><div class=\"line\"><a name=\"l00048\"></a><span class=\"lineno\"> 48</span> HISTORY_BUFFER_END = TELEMON_PRINTER_BUFFER_END+200</div><div class=\"line\"><a name=\"l00049\"></a><span class=\"lineno\"> 49</span> </div><div class=\"line\"><a name=\"l00050\"></a><span class=\"lineno\"> 50</span> NULL = 0</div><div class=\"line\"><a name=\"l00051\"></a><span class=\"lineno\"> 51</span> ;#define FILE_OPEN_TABLE $D000 </div><div class=\"line\"><a name=\"l00052\"></a><span class=\"lineno\"> 52</span> </div><div class=\"line\"><a name=\"l00053\"></a><span class=\"lineno\"> 53</span> ; #define BUF1 $C100 ; Stratsed buffer</div><div class=\"line\"><a name=\"l00054\"></a><span class=\"lineno\"> 54</span> </div><div class=\"line\"><a name=\"l00055\"></a><span class=\"lineno\"> 55</span> ;#define BUFBUF $C080 ; buffers definition</div><div class=\"line\"><a name=\"l00056\"></a><span class=\"lineno\"> 56</span> BUFROU:= $C500 ; Routines <span class=\"keywordflow\">for</span> buffers gestion</div><div class=\"line\"><a name=\"l00057\"></a><span class=\"lineno\"> 57</span> </div><div class=\"line\"><a name=\"l00058\"></a><span class=\"lineno\"> 58</span> </div><div class=\"line\"><a name=\"l00059\"></a><span class=\"lineno\"> 59</span> SIZE_OF_STACK_BANK = 1</div><div class=\"line\"><a name=\"l00060\"></a><span class=\"lineno\"> 60</span> </div><div class=\"line\"><a name=\"l00061\"></a><span class=\"lineno\"> 61</span> BNKOLD:=$40F </div><div class=\"line\"><a name=\"l00062\"></a><span class=\"lineno\"> 62</span> </div><div class=\"line\"><a name=\"l00063\"></a><span class=\"lineno\"> 63</span> ADDRESS_READ_BETWEEN_BANK:=$15</div><div class=\"line\"><a name=\"l00064\"></a><span class=\"lineno\"> 64</span> ADDRESS_VECTOR_FOR_ADIOB:=$17</div><div class=\"line\"><a name=\"l00065\"></a><span class=\"lineno\"> 65</span> BNK_TO_SWITCH:=$410</div><div class=\"line\"><a name=\"l00066\"></a><span class=\"lineno\"> 66</span> </div><div class=\"line\"><a name=\"l00067\"></a><span class=\"lineno\"> 67</span> tmp1:=$34</div><div class=\"line\"><a name=\"l00068\"></a><span class=\"lineno\"> 68</span> ptr1:=$32</div><div class=\"line\"><a name=\"l00069\"></a><span class=\"lineno\"> 69</span> </div><div class=\"line\"><a name=\"l00070\"></a><span class=\"lineno\"> 70</span> .bss</div><div class=\"line\"><a name=\"l00071\"></a><span class=\"lineno\"> 71</span> .org $200</div><div class=\"line\"><a name=\"l00072\"></a><span class=\"lineno\"> 72</span> KERNEL_ERRNO:</div><div class=\"line\"><a name=\"l00073\"></a><span class=\"lineno\"> 73</span>  .res 1</div><div class=\"line\"><a name=\"l00074\"></a><span class=\"lineno\"> 74</span> </div><div class=\"line\"><a name=\"l00075\"></a><span class=\"lineno\"> 75</span> .bss</div><div class=\"line\"><a name=\"l00076\"></a><span class=\"lineno\"> 76</span> .org $4C7</div><div class=\"line\"><a name=\"l00077\"></a><span class=\"lineno\"> 77</span> FIXME_DUNNO</div><div class=\"line\"><a name=\"l00078\"></a><span class=\"lineno\"> 78</span> .res 1</div><div class=\"line\"><a name=\"l00079\"></a><span class=\"lineno\"> 79</span> STACK_BANK</div><div class=\"line\"><a name=\"l00080\"></a><span class=\"lineno\"> 80</span> .res SIZE_OF_STACK_BANK</div><div class=\"line\"><a name=\"l00081\"></a><span class=\"lineno\"> 81</span> READ_BYTE_FROM_OVERLAY_RAM</div><div class=\"line\"><a name=\"l00082\"></a><span class=\"lineno\"> 82</span> ; <span class=\"keyword\">this</span> contains a routine length : 20 bytew</div><div class=\"line\"><a name=\"l00083\"></a><span class=\"lineno\"> 83</span> .res 20</div><div class=\"line\"><a name=\"l00084\"></a><span class=\"lineno\"> 84</span> </div><div class=\"line\"><a name=\"l00085\"></a><span class=\"lineno\"> 85</span> .bss</div><div class=\"line\"><a name=\"l00086\"></a><span class=\"lineno\"> 86</span> .org $500</div><div class=\"line\"><a name=\"l00087\"></a><span class=\"lineno\"> 87</span> </div><div class=\"line\"><a name=\"l00088\"></a><span class=\"lineno\"> 88</span> kernel_ch376_current_mode_fs:</div><div class=\"line\"><a name=\"l00089\"></a><span class=\"lineno\"> 89</span>  .res 1 ; FIXME</div><div class=\"line\"><a name=\"l00090\"></a><span class=\"lineno\"> 90</span> </div><div class=\"line\"><a name=\"l00091\"></a><span class=\"lineno\"> 91</span> kernel_end_of_variables_before_BUFNOM:</div><div class=\"line\"><a name=\"l00092\"></a><span class=\"lineno\"> 92</span> KERNEL_TMP_XEXEC:</div><div class=\"line\"><a name=\"l00093\"></a><span class=\"lineno\"> 93</span>  .res 1</div><div class=\"line\"><a name=\"l00094\"></a><span class=\"lineno\"> 94</span> .if kernel_end_of_variables_before_BUFNOM > BUFNOM</div><div class=\"line\"><a name=\"l00095\"></a><span class=\"lineno\"> 95</span>  .error <span class=\"stringliteral\">"Error BUFNOM is written by kernel variables try to move some variables in orix.inc after BUFNOM or BUFEDT"</span></div><div class=\"line\"><a name=\"l00096\"></a><span class=\"lineno\"> 96</span> .endif </div><div class=\"line\"><a name=\"l00097\"></a><span class=\"lineno\"> 97</span> </div><div class=\"line\"><a name=\"l00098\"></a><span class=\"lineno\"> 98</span> .bss</div><div class=\"line\"><a name=\"l00099\"></a><span class=\"lineno\"> 99</span> .org BUFNOM</div><div class=\"line\"><a name=\"l00100\"></a><span class=\"lineno\"> 100</span> .res 14</div><div class=\"line\"><a name=\"l00101\"></a><span class=\"lineno\"> 101</span> kernel_malloc:</div><div class=\"line\"><a name=\"l00102\"></a><span class=\"lineno\"> 102</span>  .tag kernel_malloc_struct</div><div class=\"line\"><a name=\"l00103\"></a><span class=\"lineno\"> 103</span> </div><div class=\"line\"><a name=\"l00104\"></a><span class=\"lineno\"> 104</span> .ifdef WITH_DEBUG</div><div class=\"line\"><a name=\"l00105\"></a><span class=\"lineno\"> 105</span> kernel_debug:</div><div class=\"line\"><a name=\"l00106\"></a><span class=\"lineno\"> 106</span>  .tag kernel_debug_struct</div><div class=\"line\"><a name=\"l00107\"></a><span class=\"lineno\"> 107</span> .endif</div><div class=\"line\"><a name=\"l00108\"></a><span class=\"lineno\"> 108</span> </div><div class=\"line\"><a name=\"l00109\"></a><span class=\"lineno\"> 109</span> kernel_process:</div><div class=\"line\"><a name=\"l00110\"></a><span class=\"lineno\"> 110</span>  .tag kernel_process_struct</div><div class=\"line\"><a name=\"l00111\"></a><span class=\"lineno\"> 111</span>  </div><div class=\"line\"><a name=\"l00112\"></a><span class=\"lineno\"> 112</span> kernel_end_of_variables_before_BUFEDT: </div><div class=\"line\"><a name=\"l00113\"></a><span class=\"lineno\"> 113</span> .if kernel_end_of_variables_before_BUFEDT > BUFEDT</div><div class=\"line\"><a name=\"l00114\"></a><span class=\"lineno\"> 114</span>  .error <span class=\"stringliteral\">"Error BUFEDT is written by kernel variables try to move some variables in orix.inc after $590"</span></div><div class=\"line\"><a name=\"l00115\"></a><span class=\"lineno\"> 115</span> .endif </div><div class=\"line\"><a name=\"l00116\"></a><span class=\"lineno\"> 116</span> </div><div class=\"line\"><a name=\"l00117\"></a><span class=\"lineno\"> 117</span> .bss</div><div class=\"line\"><a name=\"l00118\"></a><span class=\"lineno\"> 118</span> .org BUFEDT</div><div class=\"line\"><a name=\"l00119\"></a><span class=\"lineno\"> 119</span> .res 110</div><div class=\"line\"><a name=\"l00120\"></a><span class=\"lineno\"> 120</span> ORIX_ARGV:</div><div class=\"line\"><a name=\"l00121\"></a><span class=\"lineno\"> 121</span>  .res MAX_LENGTH_OF_FILES*BASH_MAX_ARGS</div><div class=\"line\"><a name=\"l00122\"></a><span class=\"lineno\"> 122</span> ; used to check term</div><div class=\"line\"><a name=\"l00123\"></a><span class=\"lineno\"> 123</span> TEMP_ORIX_1:</div><div class=\"line\"><a name=\"l00124\"></a><span class=\"lineno\"> 124</span>  .res 1</div><div class=\"line\"><a name=\"l00125\"></a><span class=\"lineno\"> 125</span> ORIX_ARGC:</div><div class=\"line\"><a name=\"l00126\"></a><span class=\"lineno\"> 126</span>  .res 1</div><div class=\"line\"><a name=\"l00127\"></a><span class=\"lineno\"> 127</span> KERNEL_DRIVER_MEMORY:</div><div class=\"line\"><a name=\"l00128\"></a><span class=\"lineno\"> 128</span>  .res 170</div><div class=\"line\"><a name=\"l00129\"></a><span class=\"lineno\"> 129</span> </div><div class=\"line\"><a name=\"l00130\"></a><span class=\"lineno\"> 130</span> .out .sprintf(<span class=\"stringliteral\">"ORIX_ARGV : %x"</span>, ORIX_ARGV)</div><div class=\"line\"><a name=\"l00131\"></a><span class=\"lineno\"> 131</span> .out .sprintf(<span class=\"stringliteral\">"ORIX_ARGC : %x"</span>, ORIX_ARGC)</div><div class=\"line\"><a name=\"l00132\"></a><span class=\"lineno\"> 132</span> </div><div class=\"line\"><a name=\"l00133\"></a><span class=\"lineno\"> 133</span> </div><div class=\"line\"><a name=\"l00134\"></a><span class=\"lineno\"> 134</span> kernel_end_of_memory_for_kernel:</div><div class=\"line\"><a name=\"l00135\"></a><span class=\"lineno\"> 135</span> </div><div class=\"line\"><a name=\"l00136\"></a><span class=\"lineno\"> 136</span> .out .sprintf(<span class=\"stringliteral\">"kernel_end_of_memory_for_kernel : %x"</span>, kernel_end_of_memory_for_kernel)</div><div class=\"line\"><a name=\"l00137\"></a><span class=\"lineno\"> 137</span> </div><div class=\"line\"><a name=\"l00138\"></a><span class=\"lineno\"> 138</span> kernel_end_of_variables_after_BUFEDT: </div><div class=\"line\"><a name=\"l00139\"></a><span class=\"lineno\"> 139</span> .if kernel_end_of_variables_after_BUFEDT > $7FF</div><div class=\"line\"><a name=\"l00140\"></a><span class=\"lineno\"> 140</span>  .error <span class=\"stringliteral\">"Error start of execution program for binary ($800) is written by kernel variables try to move some variables in orix.inc before $800"</span></div><div class=\"line\"><a name=\"l00141\"></a><span class=\"lineno\"> 141</span> .endif </div><div class=\"line\"><a name=\"l00142\"></a><span class=\"lineno\"> 142</span> </div><div class=\"line\"><a name=\"l00143\"></a><span class=\"lineno\"> 143</span> </div></div><!-- fragment --></div><!-- contents -->\n<!-- start footer part -->\n<hr class=\"footer\"/><address class=\"footer\"><small>\nGenerated on Sat Apr 25 2020 11:13:33 for Orix : Kernel by  <a href=\"http://www.doxygen.org/index.html\">\n<img class=\"footer\" src=\"doxygen.png\" alt=\"doxygen\"/>\n</a> 1.8.13\n</small></address>\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.757888674736023,
"alphanum_fraction": 0.7687894701957703,
"avg_line_length": 40.5,
"blob_id": "1a1fb8d5a3ecad1a7d536677c31aadeb20fbd5d6",
"content_id": "b9649c071e74035cde6bf468952b6a7c519d8906",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1743,
"license_type": "no_license",
"max_line_length": 211,
"num_lines": 42,
"path": "/docs/developer_manual/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Installation for developpers\n\nThere is two way to develop : Oricutron and cross dev with test on real computer\n\n## Step 1 : Download the oricutron archive with all things set :\n\n[oricutron_twilighte_board_for_projects.zip](oricutron_twilighte_board_for_projects.zip)\n\nor\n\nConfigure and start oricutron with twilighte board\n\nBuild from source code oricutron : [https://github.com/pete-gordon/oricutron](https://github.com/pete-gordon/oricutron) or download last windows binaries : https://iss.sandacite.com/iss/oricutron/\n\nIn oricutron.cfg, you have just to set the right parameter to start oric with the board emulated\n\n``` ca65\nch376 = yes\ntwilighte_board = yes\n```\n\nplugins/twilighte_board/twilighte.cfg you can put any rom you want.\n\n## Step 2 : get main rom (kernel and shell)\n\nSkip this step, if you downloaded \"oricutron_twilighte_board_for_projects\" arhive\n\n## Step 3 : start oricutron\n\nStarts oricutron, if everything is OK, Orix starts, if you have missing rom at boot, modify plugins/twilighte_board/twilighte.cfg in order to replace wrong ROM.\n\nthere is 2 folders on oricutron: sdcard and usbdrive, in orix type \"/#mount\", if usb is shown, then \"usbdrive/\" folder is the main folder.\n\nUnder Linux, all filenames must be uppercases (folder and files), it's mainly because emulation rescpect FAT32 specs in the sdcard/usbdrive chip\n\n## Step 4 : Download/build cc65\n\nBuild it from cc65. You can build your first program in C with cc65 and telestrat target. It builds an Orix binary. This binary can be put in bin/ folder of the device (for oricutron : usbdrive or sdcard folder)\n\n## Step 5 : Download SDK (mainly for assembly)\n\nClone [https://github.com/assinie/orix-sdk](https://github.com/assinie/orix-sdk) in your project folder\n"
},
{
"alpha_fraction": 0.5352480411529541,
"alphanum_fraction": 0.5639686584472656,
"avg_line_length": 15.409090995788574,
"blob_id": "9fcb3250ebc5d00bd6cf492d689386b67839a1f4",
"content_id": "67a0e54f6ca0af053eb2c24e88b4b492e3249624",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 383,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 22,
"path": "/docs/samples/c_samples/writefile.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Write a file\r\n\r\n```c\r\n#include <stdio.h>\r\n\r\nstatic unsigned char buffer[4000];\r\n\r\nint main() {\r\n FILE* fp;\r\n\r\n unsigned int nb_write;\r\n static unsigned char destfilename[9]=\"myfile.txt\"; \r\n\r\n fp = fopen(destfilename, \"wb\");\r\n if (!fp) {\r\n printf(\"Can't create output file\\n\");\r\n return 1;\r\n }\r\n nb_write=fwrite(buffer, 1, 4000, fp);\r\n fclose(fp);\r\n} \r\n```\r\n"
},
{
"alpha_fraction": 0.5637727975845337,
"alphanum_fraction": 0.6098606586456299,
"avg_line_length": 16.294116973876953,
"blob_id": "021f76f9eac86756baf52596fe6db9e68b5e5acb",
"content_id": "e38df682f653edfbeb1fb439954f7d3c9581dca9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 51,
"path": "/docs/kernel/primitives/xfseek.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XFSEEK\r\n\r\nAvailable in kernel 2022.2\r\n\r\n## Description\r\n\r\nSeek into file\r\n\r\n## Input\r\n\r\n* X whence\r\n* AY : position 0 to 15 bits (word)\r\n* RESB : position 16 to 32 bits(word)\r\n* RES : fd (but only I byte is managed)\r\n\r\n## Output\r\n\r\n* Return A=EINVAL if whence is not recognize\r\n* Return A=EOK if seek has been performed\r\n* Return A=$FF if something is wrong when seek has performed\r\n\r\n## Example\r\n\r\n``` ca65\r\n; Move to $20 bytes in the file from the current position\r\n\r\n; [IN] X whence\r\n; [IN] AY position 0 to 15\r\n; [IN] RESB position 0 to 31\r\n; [IN] RES fd\r\n\r\n .include \"telestrat.inc\"\r\n\r\n ldy #$10\r\n lda #$00\r\n tax\r\n sta RESB\r\n sta RESB+1\r\n\r\n lda fp\r\n ldx #SEEK_CUR\r\n BRK_TELEMON XFSEEK\r\n rts\r\n\r\n```\r\n\r\n!!! warning \"XFSEEK is available since kernel v2022.2\"\r\n\r\n!!! bug \"XFSEEK have bugs for SEEK_CUR in 2022.4.1 (fixed in kernel 2023.1)\"\r\n\r\n!!! bug \"XFSEEK does not manage SEEK_END\"\r\n"
},
{
"alpha_fraction": 0.6199377179145813,
"alphanum_fraction": 0.6323987245559692,
"avg_line_length": 13.285714149475098,
"blob_id": "f1cf8aa0920b64b7beec66ced8efe64d50659d04",
"content_id": "2da3437b74f90d95d8a7ab2b0c402ce453fd9ab0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 21,
"path": "/kernel/docs/dynamiclink/XOPENLIB.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XOPENLIB\r\n\r\n``` ca65\r\nlda #<lib_sdl\r\nldy #>lib_sdl\r\nBRK_KERNEL OPENLIB\r\n; Return in X the id of the lib\r\nrts\r\nlib_sdl:\r\n .asciiz \"sdl.so\"\r\n\r\n```\r\n\r\n``` ca65\r\n; RES contains the id of the lib\r\n; A & Y contains maybe the struct\r\n; X contains others thing\r\n; RES contains the id of the function\r\nBRK_KERNEL XCALL\r\n\r\n```\r\n"
},
{
"alpha_fraction": 0.6032661199569702,
"alphanum_fraction": 0.6810758709907532,
"avg_line_length": 32.58064651489258,
"blob_id": "cfee394eddfc4bf983a1b96a459aaadb59877bb5",
"content_id": "36af28cc55cffb73957b9361c1cd961e010f3625",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1041,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 31,
"path": "/doxygen/doxybook_output/Files/dir_6c260d28152e78a3ffcc2e06b7438967.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src\n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/commands](Files/dir_e1568de7a9ec0caf269f7729a27efb24.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/commands)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/data](Files/dir_eb94e028ad508402029845f2921e79f7.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/data)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions](Files/dir_2288eccfea1af74b995388678c757cc0.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/functions)** |\n\n## Files\n\n| Name |\n| -------------- |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/rom.s](Files/rom_8s.md#file-rom.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5981523990631104,
"alphanum_fraction": 0.6050808429718018,
"avg_line_length": 11.967741966247559,
"blob_id": "5d7d154e96fb6e972e47cb8b8d58ccc1aa21a6d3",
"content_id": "b4014f0fedf300378af0995189a9fc14eafb76b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 31,
"path": "/docs/kernel/primitives/xgetcwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XGETCWD (getcwd)\r\n\r\n## Description\r\n\r\nReturn current directory of the process\r\n\r\n## Input\r\n\r\n## Output\r\n\r\nA and Y returns a ptr of the string\r\n\r\n## Example\r\n\r\n``` ca65\r\n .include \"telestrat.inc\"\r\n\r\n BRK_TELEMON XGETCWD\r\n\r\n ; Displays\r\n BRK_TELEMON XWSTR0\r\n rts\r\n```\r\n\r\n## Memory usage\r\n\r\n## zeropage\r\n\r\n* RESB\r\n\r\n!!! tip \"See [getcwd](../../../developer_manual/orixsdk_macros/getcwd) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.5985267162322998,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 22.60869598388672,
"blob_id": "998fec9c7229ea9d0180aa2d070c6e9903656928",
"content_id": "13a3ba5175483865d49d4348498ee3696ebf108e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 543,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 23,
"path": "/doxygen/doxybook_output/Files/dir_8a0a2fbb0e248d2b08adec17bb698d4e.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc\n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi](Files/dir_a5544c2bf0b70f8d417c4d3bfea04409.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/functions/subfunc/vi)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7052631378173828,
"alphanum_fraction": 0.7052631378173828,
"avg_line_length": 14.777777671813965,
"blob_id": "be27c5cb40e523907b31a7a1d6c2b81e4c6ad7a3",
"content_id": "06051823848726c35eb0d58e2a904dfb1cc1177c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 18,
"path": "/docs/commands/submit.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: submit\n\n### submit utility\n\n## SYNOPSYS\n+ submit file [arg...]\n\n## EXAMPLES\n+ submit help.sub\n\n## DESCRIPTION\n**submit** is a command language interpreter that executes commands read from a file.\n\n## OPTIONS\n* no options\n\n## SOURCE\nhttps://github.com/orix-software/submit\n\n"
},
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7916666865348816,
"avg_line_length": 7.333333492279053,
"blob_id": "8e8cf227cb957b83d49a2a794fa1879ada04548b",
"content_id": "951cef121bdd3c23f6d9bfa506e8dcaa4ab95f8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 3,
"path": "/docs/commands/twiload.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Twilload\n\nStart loader"
},
{
"alpha_fraction": 0.47959184646606445,
"alphanum_fraction": 0.5408163070678711,
"avg_line_length": 23.5,
"blob_id": "62436157c21ea4ee4fb5766ec929881468ce1aad",
"content_id": "6bc16b7793b5a1040199b0f1c0ec3369611aeb86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 4,
"path": "/doxygen/doc/html/search/all_0.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['_5fclrscr_5fvi_2es_0',['_clrscr_vi.s',['../__clrscr__vi_8s.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.6792452931404114,
"alphanum_fraction": 0.7028301954269409,
"avg_line_length": 16.909090042114258,
"blob_id": "cc9d50209ffd318de269ff46cec6669ab605e6ad",
"content_id": "d46d4adeda2b2990de20b10ec0272d00d15e6ee4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 11,
"path": "/docs/hardware/joysticks.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Joysticks\r\n\r\n## Read joysticks states\r\n\r\nOn the board, joysticks works likes Telestrat one's.\r\n\r\nRight port has 3 buttons\r\n\r\nLeft port has 1 button\r\n\r\nThere is 2 bits to read joysticks states. Bit 7 and 6\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7124999761581421,
"alphanum_fraction": 0.7458333373069763,
"avg_line_length": 18.45945930480957,
"blob_id": "4bb67c57ef458d929e86bd6912ddc96134fdd13f",
"content_id": "34735fa86b6d373eb267a103488f9563e95584a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 37,
"path": "/docs/commands/basic10.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# basic10\n\n## Introduction\n\nStart Oric-1 rom\n\n## SYNOPSYS\n\n+ basic10\n+ basic10 -g\n+ basic10 -l\n+ basic10 \"MYTAPE\n\n## DESCRIPTION\n\nThis command starts the Oric-1 rom. This rom did not test RAM and cload/csave are done on sdcard. It means that it calls file from sdcard.\n\nCload works with .tap file. Multitap files works too.\n\nGet a tape file, and place it in the root folder of the sdcard.\n\nWhen there is no parameter, basic10 has /home/basic10 default folder\n\nStarts basic10 :\n/#basic10\nor\n/#basic10 \"DEFENDER\n\nCLOAD\"ZORGONS => it will load zorgons.tap\n\n## Working software\n\n+ all games except the Hobbit (another ROM is available)\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/basic10.asm\n"
},
{
"alpha_fraction": 0.6998012065887451,
"alphanum_fraction": 0.6998012065887451,
"avg_line_length": 17.629629135131836,
"blob_id": "7e9fe4b8f3fbbbf9118e1ebe72db06cf3b2b91a0",
"content_id": "b91ede2d1b04f921e5d2991c990ec8ee7e9aa51f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 509,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 27,
"path": "/docs/commands/ls.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# ls\n\n## Display catalog\n\nList all the file in the current folder. Token are supported (*,?) ex : « ls ?e.tap » will list all files with a ‘e ’ in the\nsecond letter\n\n## SYNOPSYS\n\nlist all the files in the current folder\n\n+ ls\n\nList all *.tap files\n\n+ ls *.tap\n\nList size and datetime of the file\n+ ls -l\n\n## DESCRIPTION\n\nDirectories are in ^FBLUE^G color. It manages '-l' and Pattern works in different ways : ls *.tap\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/ls.asm\n"
},
{
"alpha_fraction": 0.5779122710227966,
"alphanum_fraction": 0.6111951470375061,
"avg_line_length": 16.36111068725586,
"blob_id": "79525d12acec174790ea8cb23a6e24603c917089",
"content_id": "116a89e5386f21077fa3ad71c570d4c17e1b5f84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 661,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 36,
"path": "/docs/kernel/primitives/xfread.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XCLOSE\r\n\r\n## Description\r\n\r\nRead byte from files\r\n\r\n## Input\r\n\r\n* AY contains the length to read\r\n* PTR_READ_DEST must be set because it's the ptr_dest\r\n* X contains the fd id\r\n\r\n## Output\r\n\r\nA & X contains the length read\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<$A000\r\n sta PTR_READ_DEST\r\n lda #>$A000\r\n sta PTR_READ_DEST+1\r\n\r\n lda #<12 ; Read 12 bytes\r\n ldy #>12 ; REad 12 bytes\r\n ldx fp\r\n BRK_KERNEL XFREAD\r\n rts\r\n```\r\n\r\n!!! tip \"See [fread](../../../developer_manual/orixsdk_macros/fread) macro from orix-sdk to use it\"\r\n\r\n!!! fail \"XFREAD does not manage multiples opened files before kernel 2022.4 \"\r\n"
},
{
"alpha_fraction": 0.4821917712688446,
"alphanum_fraction": 0.5287671089172363,
"avg_line_length": 13.038461685180664,
"blob_id": "5c84bab27a543a370b669711c605331f63c0a3ff",
"content_id": "d90c20e4f4b373deef7e88513ae7ff0ac025b442",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 730,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 52,
"path": "/doxygen/doxybook_output_vi/Files/vi__set__length__file_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_set_length_file.s\n\n---\n\n# /Routines/vi_set_length_file.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_length_file](Files/vi__set__length__file_8s.md#Routine-vi-set-length-file)** |\n\n\n## Routine documentation\n\n### Routine vi_set_length_file\n\n```ca65\nvi_set_length_file\n```\n\n\n\n\n## Source code\n\n```ca65\n\n; A,X,Y RES contains the length\n\n.proc vi_set_length_file\n ; set file length A and X contains the value\n\n pha\n jsr populate_tmp0_16_with_ptr_length_file\n pla\n ldy #$00\n sta (tmp0_16),y\n txa\n iny\n sta (tmp0_16),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5518072247505188,
"alphanum_fraction": 0.574698805809021,
"avg_line_length": 17.04347801208496,
"blob_id": "01458531e4db18275eb34f2109671ece8c75ee7a",
"content_id": "a054f64505dfd9c301f039f601fd64174e660857",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 830,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 46,
"path": "/doxygen/doxybook_output/Files/vi__set__ptr__last__char_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_ptr_last_char.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_ptr_last_char.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_ptr_last_char](Files/vi__set__ptr__last__char_8s.md#function-vi-set-ptr-last-char)**() |\n\n\n## Functions Documentation\n\n### function vi_set_ptr_last_char\n\n```cpp\nvi_set_ptr_last_char()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_set_ptr_last_char\n ; A and X contains the last char ptr\n\n ldy #vi_struct_data::ptr_last_char_file\n sta (vi_struct),y\n txa\n iny\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7246531248092651,
"alphanum_fraction": 0.7246531248092651,
"avg_line_length": 25.02777862548828,
"blob_id": "74a97d13e416341377c07fa73e6bb2ca95d085da",
"content_id": "3e2a68a5fafc4101bfd56025ba19da991a31c7bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 36,
"path": "/docs/commands/forth.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: forth\n\nLaunch forth for Orix\n\n## SYNOPSYS\n+ forth\n\n## DESCRIPTION\n\n**Forth** is a language based on teleforth (telestrat). You can type **VLIST** to list all words. If you need to switch to hires, you can type **GRAFX** and **HIRES**. If you need to switch to text: **GRAFX** and **TEXT**\n\n\nYou can use forth language. It works the same ways than Tele forth (it's\nteleforth but it write file on sdcard/usbdrive).\n\nYou can download Teleforth langage in order to see how to program in\nforth.\n\nWhen you type « forth » forth starts with the current folder directory.\n\nIf you were in /, forth will load and save file in this folder.\n\nIn others cases, you can create a forth folder in home and goes into it\nin forth for example :\n\n* mkdir home\n* #cd home\n* #mkdir forth\n* forth\n\nif you type « cd forth» in forth environnement, all files actions will\nbe perform in « /*home/*forth »\n\n## SOURCE\n\nhttps://github.com/assinie/Tele-Forth\n"
},
{
"alpha_fraction": 0.4878934621810913,
"alphanum_fraction": 0.5254237055778503,
"avg_line_length": 14.296296119689941,
"blob_id": "2dd96b1687765c4e325d8acf548cbf1d2a7331c3",
"content_id": "da439dad34f2f4e06a51df27964d4e6639087d2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 826,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 54,
"path": "/doxygen/doxybook_output_vi/Files/vi__xpos__screen__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_xpos_screen_plus_plus.s\n\n---\n\n# vi_xpos_screen_plus_plus.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_xpos_screen_plus_plus](Files/vi__xpos__screen__plus__plus_8s.md#Routine-vi-xpos-screen-plus-plus)** |\n\n\n## Routine documentation\n\n### Routine vi_xpos_screen_plus_plus\n\n```ca65\nvi_xpos_screen_plus_plus\n```\n\n\n\n\n## Source code\n\n```ca65\n; Returns in A=1 if we reached the eol\n\n.proc vi_xpos_screen_plus_plus\n ; ypos_screen=ypos_screen+1\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n cmp #VI_EDITOR_MAX_COLUMN\n beq @no_add\n clc\n adc #$01\n sta (vi_struct),y\n lda #$00\n rts\n\n@no_add:\n lda #$01\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.4923076927661896,
"alphanum_fraction": 0.5384615659713745,
"avg_line_length": 11.461538314819336,
"blob_id": "f01607d3babdff159ec2fffd36ecc1b67a515235",
"content_id": "8c844a4e3345782fbf80f39fbb1c37f3a5a53acb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 26,
"path": "/doxygen/doc/html/search/searchdata.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var indexSectionsWithContent =\n{\n 0: \"_lprstvxy\",\n 1: \"v\",\n 2: \"_rstv\",\n 3: \"v\",\n 4: \"lpvxy\"\n};\n\nvar indexSectionNames =\n{\n 0: \"all\",\n 1: \"classes\",\n 2: \"files\",\n 3: \"functions\",\n 4: \"variables\"\n};\n\nvar indexSectionLabels =\n{\n 0: \"All\",\n 1: \"Data Structures\",\n 2: \"Files\",\n 3: \"Functions\",\n 4: \"Variables\"\n};\n\n"
},
{
"alpha_fraction": 0.6089385747909546,
"alphanum_fraction": 0.6374922394752502,
"avg_line_length": 39.275001525878906,
"blob_id": "20ae6db5395a2b4c8c7723c0ff559bdc7db8d643",
"content_id": "6fdea7573fb4a7311919a31482837301ee5a0b0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1611,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 40,
"path": "/doxygen/doxybook_output_vi/Files/dir_2288eccfea1af74b995388678c757cc0.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines\n\n---\n\n# /Routines\n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[/Routines/subfunc](Files/dir_8a0a2fbb0e248d2b08adec17bb698d4e.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/Routines/subfunc)** |\n\n## Files\n\n| Name |\n| -------------- |\n| **[/Routines/_clrscr_vi.s](Files/__clrscr__vi_8s.md#file--clrscr-vi.s)** |\n| **[/Routines/tables.s](Files/tables_8s.md#file-tables.s)** |\n| **[/Routines/vi_command_edition.s](Files/vi__command__edition_8s.md#file-vi-command-edition.s)** |\n| **[/Routines/vi_displays_info.s](Files/vi__displays__info_8s.md#file-vi-displays-info.s)** |\n| **[/Routines/vi_edition_keyboard.s](Files/vi__edition__keyboard_8s.md#file-vi-edition-keyboard.s)** |\n| **[/Routines/vi_editor_switch_off_cursor.s](Files/vi__editor__switch__off__cursor_8s.md#file-vi-editor-switch-off-cursor.s)** |\n| **[/Routines/vi_editor_switch_on_cursor.s](Files/vi__editor__switch__on__cursor_8s.md#file-vi-editor-switch-on-cursor.s)** |\n| **[/Routines/vi_fill_screen_with_empty_line.s](Files/vi__fill__screen__with__empty__line_8s.md#file-vi-fill-screen-with-empty-line.s)** |\n| **[/Routines/vi_put_char.s](Files/vi__put__char_8s.md#file-vi-put-char.s)** |\n| **[/Routines/vi_set_length_file.s](Files/vi__set__length__file_8s.md#file-vi-set-length-file.s)** |\n| **[/Routines/vi_struct.s](Files/vi__struct_8s.md#file-vi-struct.s)** |\n| **[/Routines/vi_switch_to_edition_mode.s](Files/vi__switch__to__edition__mode_8s.md#file-vi-switch-to-edition-mode.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5034013390541077,
"alphanum_fraction": 0.5442177057266235,
"avg_line_length": 14.11111068725586,
"blob_id": "7ff8dfc32d69b3cb282df2d3ee346ce8fd4c922a",
"content_id": "ff20b44992f28c5d7d2e00b48612752d5ca35500",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 9,
"path": "/kernel/docs/primitives/xscrob.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XSCROB (Scroll down)\r\n\r\nScroll all lines from the top to the bottom\r\n\r\n```ca65\r\n ldx #$01\r\n ldy #25\r\n BRK_KERNEL XSCROB\r\n``` "
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.5763888955116272,
"avg_line_length": 17.200000762939453,
"blob_id": "9e375ac1dc23a63f9887716116ad7e3f50ba7d82",
"content_id": "2985f53c42c1eec7df02c49336e2f98dcd860eb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 15,
"path": "/docs/kernel/primitives/xwstr0.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XWSTR0\r\n\r\nDisplays a string\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n lda #<str\r\n ldy #>str\r\n BRK_KERNEL XWSTR0\r\n rts\r\nstr:\r\n .asciiz \"Hello world\"\r\n```\r\n\r\n!!! tip \"See [print](../../developer_manual/orixsdk_macros/print/) macro from orix-sdk to use it easily\"\r\n"
},
{
"alpha_fraction": 0.5269461274147034,
"alphanum_fraction": 0.56886225938797,
"avg_line_length": 10.44444465637207,
"blob_id": "95c7989eb18ab0804053f5cba9cbaf457c012f4f",
"content_id": "01dfca53b1673613453001fcad955946f9f9471f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 27,
"path": "/kernel/docs/primitives/xmalloc.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XMALLOC (malloc)\r\n\r\n## Description\r\n\r\nAllocate memory. Use orix-sdk with macro to handle XMALLOC\r\n\r\n## Input\r\n\r\nA and Y : number of bytes\r\n\r\n## Output\r\n\r\nA&Y = $0000 : OOM or others errors\r\nelse\r\nA&Y : ptr\r\n\r\n## Usage\r\n\r\n``` ca65\r\n lda #<4000\r\n ldy #>4000\r\n\r\n BRK_TELEMON XMALLOC\r\n ; A & Y contains a ptr\r\n rts\r\n\r\n```"
},
{
"alpha_fraction": 0.6465116143226624,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 19.774192810058594,
"blob_id": "75d9e9ad32629634835d5f0574ca372b7c629a76",
"content_id": "7c701bc8a698c5d6723e616dbca6e28d67ccd89d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 645,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 31,
"path": "/docs/commands/grep.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: grep\n\n### grep utility\n\n## SYNOPSYS\n+ grep [-ncisw] string filename\n+ grep -h\n\n## DESCRIPTION\n**grep** display all lines of a text file that contain a string.\n\n## OPTIONS\n* -h^GDisplay command syntax\n* -n^GShow line numbers\n* -c^GCount only the matching lines\n* -i^GIgnore case\n* -w^GString can use wildcards *, ?, ^ and $\n* -s^GSilent mode\n\n## EXAMPLES\n+ grep error menu.sub\n+ grep -n \"level 1\" menu.sub\n+ grep -i ERROR menu.sub\n+ grep -ni 'level 2' menu.sub\n+ grep -w '\\*lev?? 2\\*'menu.sub\n+ grep -w '^if' menu.sub\n+ grep -w 'error$' menu.sub\n+ grep -w 'if*level ??' menu.sub\n\n## SOURCE\nhttps://github.com/orix-software/grep\n\n"
},
{
"alpha_fraction": 0.7117437720298767,
"alphanum_fraction": 0.7393238544464111,
"avg_line_length": 31.08571434020996,
"blob_id": "23836a612a0ffd0f611d9d9d889101de5fde749e",
"content_id": "072e5c7781ac6d1b512ab887fcd04c27552cc31d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1124,
"license_type": "no_license",
"max_line_length": 239,
"num_lines": 35,
"path": "/docs/commands/loader.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Loader\n\nUses funct + L to launch the menu or type \"twiload\" if you have Oric-1 keyboard\n\nYou can select the rom you want to start :\n\n* For atmos (usbdrive or sdcard), root path of the rom is /home/basic11\n* For Oric-1 (usbdrive or sdcard), root path of the rom is /home/basic10\n* For Pravetzt (usbdrive or sdcard), root path of the rom is /home/pravetzt\n\nand others Roms\n\n## Usage\n\n* up/left arrow : move into list\n* space : displays infos of the sofware if available\n* ESC : quit loader\n\n### Informations\n\n## Informations\n\nLoader can starts tape file as atmos .tap file by category (demo, games).\n\nDemo, games, tools, music categories displays atmos tape file and Oric-1 tapes files. It means that loader is able to start atmos software only and Oric-1 only.\n\nIn order to avoid 2 softwares for Oric-1 and another one for atmos when it's available for theses 2 machines, the choice is that loader displays atmos tape file, and if the software does not exist on the atmos, Oric-1 version is displayed.\n\nNumber of software for each category since 15/5/2022\n\n* Roms : 60\n* Demo : 35\n* Games : 788\n* Tools : 142\n* Music : 80\n\n"
},
{
"alpha_fraction": 0.7482014298439026,
"alphanum_fraction": 0.7482014298439026,
"avg_line_length": 14.44444465637207,
"blob_id": "e5b0f1bca48212eef83e07860907fde281513c27",
"content_id": "8648e7f2f947eca37ef9a4607f3d7bad021a07f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 278,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 18,
"path": "/docs/commands/help.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# help\n\n## Introduction\n\nDisplay commands\n\n## SYNOPSYS\n\n+ help\n\n## DESCRIPTION\n\n- No parameter : Displays all internals shell command\n- bX parameter : Displays all internals commands in bank X\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/help.asm\n"
},
{
"alpha_fraction": 0.7226791977882385,
"alphanum_fraction": 0.7344300746917725,
"avg_line_length": 33.040000915527344,
"blob_id": "44da3621456e9bd5586119932179121d5777a390",
"content_id": "1a8b354d3c05dcc5ce58d67bafe9c2d672da4624",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 851,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 25,
"path": "/docs/tools/mount.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Mount (not developed yet)\n\n## FP\nEach FP has a struct with with string, flag and others 3 bytes :\n\nextbank (1 byte)\nvectorbank (2 bytes)\n\nWhen a fopen is done, if the file is not found on current device (ch376), before it returns null, it tries to find EXTFP string in bank\n\nIf it's found, EXTFP is executed (not forked) and it returns a A Y vector to manage this FP (if it manage this path). If it does not manage this path, it return null.\n\n## When a FREAD/FWRITE is done, if extbank is different to 0, then, Kernel will launch external vector with A and Y contains the string of the path, and X contains the action\n\nX=1 : FREAD\nX=2 : FWRITE\nX=3 : XCLOSE\n\n## ls case\n\nls should do a FOPEN call. It should return a fp. When we need to displays the external path, we need to know which command we need to launch to kernel\n\n## cp/mv case\n\ndon't know\n"
},
{
"alpha_fraction": 0.2735042870044708,
"alphanum_fraction": 0.4273504316806793,
"avg_line_length": 6.3125,
"blob_id": "6a4271840d84b74568dde2e9ab39fd09a177e416",
"content_id": "9e9d56fd781d153ccfb1db4a8f1068fee61a1c54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 16,
"path": "/doxygen/doxybook_output/index_examples.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Examples\n\n---\n\n# Examples\n\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.48750001192092896,
"alphanum_fraction": 0.5375000238418579,
"avg_line_length": 19,
"blob_id": "07b0b32f425dc4557a628d8ab100697cf3e5ac40",
"content_id": "bcc0716d3d0bca08d7170f1cb1b4a244eb4744e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 4,
"path": "/doxygen/doc/html/search/all_5.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['tables_2es_8',['tables.s',['../tables_8s.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.48033708333969116,
"alphanum_fraction": 0.584269642829895,
"avg_line_length": 14.47826099395752,
"blob_id": "3fea8461aa809d864d7e0b3e0e516febf215f991",
"content_id": "24de46d48aa6ea00ad0656cfddc364bc41a61429",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 356,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 23,
"path": "/doxygen/doxybook_output_vi/Files/dir_8a0a2fbb0e248d2b08adec17bb698d4e.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/subfunc\n\n---\n\n# /Routines/subfunc\n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[/Routines/subfunc/vi](Files/dir_a5544c2bf0b70f8d417c4d3bfea04409.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/Routines/subfunc/vi)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.49979498982429504,
"alphanum_fraction": 0.5662156343460083,
"avg_line_length": 28.462499618530273,
"blob_id": "f0ecc200ba4e7a3d5f176f6868e2042187ae92e8",
"content_id": "bd922b46ff1fb262731cbb31acf0937d71ef089e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2439,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 80,
"path": "/docs/developer_manual/header.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Header\r\n\r\n## Orix binary v2 format (relocated : bitfield table, available since Kernel 2022.4)\r\n\r\n``` ca65\r\n .byt $01,$00 ; non-C64 marker like o65 format\r\n .byt \"o\", \"r\", \"i\" ; \"ori\" MAGIC number like o65 format\r\n .byt $02 ; version of this header\r\ncpu_mode:\r\n .byt $00 ; CPU see below for description\r\n;7\r\n .byt <SIZE_BITFIELD,>SIZE_BITFIELD ; size of the bitfield map low\r\n;9\r\n .byt $00 ; reserved\r\n;10\r\n .byt $00 ; reserved\r\n;11\r\n .byt $00 ; reserved\r\n;12\r\n .byt <OFFSET_BITFIELD,>OFFSET_BITFIELD ; offset of the bitfield\r\n;14\r\n .byt <start_adress,>start_adress ; loading adress\r\n;16\r\n .byt <endofmemory,>EndOfMemory ; end of loading adress\r\n; 18\r\n .byt <start_adress,>start_adress ; Execution adress\r\n; end of header\r\nstart_adress:\r\n lda $00\r\n rts\r\nEndOfMemory:\r\n```\r\n\r\n## Orix binary v1 format (Will be deprecated)\r\n\r\nOrix binary v1 is not a relocated format. The binary must be never under $800. Relocation format (Format 2 see below) is now provided in kernel v2022.4\r\n\r\nYou don't need to know this format except if you use others assembler than ca65 (or cc65 with C). Orix-sdk provide .cfg file to add header to your code in assembly\r\n\r\n``` ca65\r\n .byt $01,$00 ; non-C64 marker like o65 format\r\n .byt \"o\", \"r\", \"i\" ; \"ori\" MAGIC number :$6f, $36, $35 like o65 format\r\n .byt $01 ; version of this header (can not be relocated)\r\ncpu_mode:\r\n .byt $00 ; CPU see below for description\r\n .byt $00 ; reserved\r\n .byt $00 ; reserved\r\n .byt $00 ; reserved\r\n .byt $00 ; reserved\r\n .byt $00 ; reserved\r\n .byt $00 ; reserved\r\n .byt $00 ; reserved\r\n .byt <start_adress,>start_adress ; loading adress\r\n .byt <endofmemory,>EndOfMemory ; end of loading adress\r\n .byt <start_adress,>start_adress ; starting adress\r\n\r\nstart_adress:\r\n *=$1000\r\n lda #$41\r\n sta $bb80\r\n rts\r\nEndOfMemory:\r\n```\r\n\r\n## Description\r\n\r\ncpu_mode\r\n For 6502 rockwell (oric version no illegals opcodes)\r\n\r\n.byt %00000000 ; 6502 (bit 0 = 0)\r\n\r\n For 65C02 WDC\r\n\r\n.byt %00000001 ; 65c02 (bit 0 = 1)\r\n\r\n For 65C816 WDC\r\n\r\n.byt %00000010 ; 65C816 (bit 1 = 1)\r\n\r\n For 6502 with illegal opcodes (rockwell)\r\n\r\n"
},
{
"alpha_fraction": 0.7594936490058899,
"alphanum_fraction": 0.7594936490058899,
"avg_line_length": 37.5,
"blob_id": "646d4a515f5bf71a6760da7341696dce1b968ffa",
"content_id": "ae26865ee24796f40c88b0154eef8c4c6b8cc2f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 4,
"path": "/docs/developer_manual/relocbin.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Relocation format\r\n\r\nIf you want to generate a relocatable format for orix, follow this link : \r\nhttps://github.com/assinie/orix-sdk/blob/master/README.md\r\n"
},
{
"alpha_fraction": 0.603740930557251,
"alphanum_fraction": 0.6394180655479431,
"avg_line_length": 27.30964469909668,
"blob_id": "d79a026dafe1eb37b7c7c771752dc04c56aa1ede",
"content_id": "dd555a758b24d39243cfb6cdadf55bdadb56f82f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5807,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 197,
"path": "/pandoc/parts/pizero.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "\\newpage\r\n\r\n[]{#anchor-65}MONITOR\r\n=====================\r\n\r\n[]{#anchor-66}Usage\r\n-------------------\r\n\r\nMonitor is a rom which can displays a monitor. It's teleass without\r\nassembler part.\r\n\r\n\\newpage\r\n\r\n[]{#anchor-67}ORIXCFG\r\n=====================\r\n\r\n[]{#anchor-68}Update kernel, shell : orixcfg\r\n--------------------------------------------\r\n\r\nWhen a new released is done, you can update the eeprom with the new\r\nkernel and new roms.\r\n\r\nIf you want to know if you need to update the kernel, you can compare\r\nyour current version, and the last release version. You can go to\r\n[http://orix.oric.org](http://orix.oric.org/) You need to have a look to\r\nthis release below :\r\n\r\n{width=\"17cm\" height=\"9.888cm\"}\r\n\r\nIf on your Oric screen, it's not the same value, you can update it. The\r\nsequence of the Orix release is Year.X. There is 4 releases a year, and\r\neach one must be done until you reach the final one, in order to avoid\r\nsome case. If your version is v2020.3 and the last available version is\r\nv2021.4. You need to update to v2020.4, then v2021.1, v2021.2, v2021.3,\r\nv2021.4, v2022.1, v2022.4\r\n\r\nIt's maybe possible to jump to version v2022.3, but it's at your own\r\nrisk because there is a « chance » that some kernel calls changed, and\r\norixcfg could do wrong step.\r\n\r\n[]{#anchor-69} Update kernel, shell\r\n----------------------------------\r\n\r\nWhen you need to update kernel, you can update it with orixcfg. You just\r\nneed to define set 4 on the command line. This step is very\r\n**dangerous** if you don't load the right file. There is no verification\r\nand any file on the command line will be load in the kernel set.\r\nUsually, kernel set file is named kernxxxx.r64.\r\n\r\nIf you did something wrong on this step, you won't be able to start orix\r\nagain. It means that you will need to remove eeprom from the card and\r\nprogram it with a eeprom programmer\r\n\r\nThis next command will load kernel.r64 to kernel set. Please wait until\r\nOrix reboots. If you have a kernel 2020.3 and you need to load a kernel\r\n2021,1, you will need to load previous kernel set before the update of\r\n2021.1.\r\n\r\n.r64 extension means that it's a 64KB set. It's usually used to define\r\nthat the file contains 4 roms of 16KB.\r\n\r\nPlease note that we provide 2 kernels version. One named\r\n« kernelsd.r64 » which means that the default device will be sdcard, and\r\nthe other one « kernelus.r64 » which means that default device will be\r\n« usb » (usbkey). If you load the wrong kernel at this step, you can use\r\ntwil command to switch to the right device, and you can start again\r\nkernel update with the right file (kernelsd.r64 or kernelus.r64\r\ndepending of your configuration).\r\n\r\n/\\#orixcfg -r -s 4 kernelsd.r64\r\n\r\n[]{#anchor-70}Load a ROM into a ram slot\r\n----------------------------------------\r\n\r\nSpace between values and switches are not optionnal, orixcfg needs\r\ntheses spaces\r\n\r\n/\\#orixcfg -b XX -l myrom.rom\r\n\r\nThis command will load myrom.rom (in the current path), in RAM bank XX\r\n\r\nOlder usage as : orixcfg -r -s X -b Y myrom.rom is no longer included in\r\norixcfg since orixcfg v2021.3\r\n\r\n[]{#anchor-71}Load a set of ROM into ROM slot\r\n---------------------------------------------\r\n\r\n/\\#orixcfg -r -s 0 myrom.r64\r\n\r\n[]{#anchor-72}This command will load myrom.r64 (in the current path), in\r\nset 0. For instance, you can not load one bank, you need to load 64KB\r\nset.\r\n\r\n[]{#anchor-73}Clear bank ram or initialize it\r\n---------------------------------------------\r\n\r\nRam bank are not initialized when the board is tested. If you have\r\ngarbage on screen when you uses bank (after you used twil -w). You have\r\nto clear all ram bank (ram bank are battery saved).\r\n\r\nIf you want to clear bank 4 of the set 0, you can do this command. You\r\nneed to do this command for each bank of each set. For instance, there\r\nis no switch to clear all the ram with one command.\r\n\r\n/\\#orixcfg -w -s 0 -b 4 -c\r\n\r\n[]{#anchor-74}Flush all ram bank\r\n--------------------------------\r\n\r\n/\\#orixcfg -w -f\r\n\r\n\\newpage\r\n\r\n\r\n[]{#anchor-75}OSID MUSIC\r\n========================\r\n\r\n[]{#anchor-76}How to play osid music ?\r\n--------------------------------------\r\n\r\nYou need to check if you have twilighte board firmware 2 :\r\n\r\n/\\#twil -f\r\n\r\nIf it returns 2 or greater, you can download some osid files :\r\n\r\n<https://www.oric.org/software/osid_music-2534.html>\r\n\r\nPlace all .tap files in /home/basic11\r\n\r\nAnd launch :\r\n\r\n/\\#basic11\r\n\r\nLoad patch to avoid to load sedoric routines (in basic command line)\r\n\r\nCLOAD«OSID\r\n\r\nAnd then load the osid file you want :\r\n\r\nCLOAD«OSNEVER\r\n\r\n\\newpage\r\n\r\n[]{#anchor-77}PWD\r\n=================\r\n\r\n[]{#anchor-78}Introduction\r\n--------------------------\r\n\r\nDisplays current PWD\r\n\r\n\\newpage\r\n\r\n[]{#anchor-79}SHA1\r\n==================\r\n\r\n[]{#anchor-80}Usage\r\n-------------------\r\n\r\nSha1 is a tool to displays a string into sha1 encoding\r\n\r\n\\newpage\r\n\r\n[]{#anchor-81}STORMLORD\r\n=======================\r\n\r\n[]{#anchor-82}Introduction\r\n--------------------------\r\n\r\nStormlord is Stormlord game port to Orix. You can use joysticks to plays\r\nto this game.\r\n\r\n[]{#anchor-83}Only one joystick port is working on this version\r\n===============================================================\r\n\r\n\\newpage\r\n\r\n[]{#anchor-84}SYSTEMD\r\n=====================\r\n\r\nSystemd is a rom which can load another ROM in ram slot. When you type\r\nsystemd, it will reads * /etc/systemd/banks and will load rom declared\r\nin this file sequencialy. It means that the first rom will be load in\r\nbank id 33, the second one in bank id 34.*\r\n\r\nThis roms can be used in a eeprom bank, you can load it with orixcfg\r\n\r\nYou can set roms in *« /etc/systemd/banks » as : *\r\n\r\n*\\[MYROMNAME1\\]*\r\n\r\n*path=/usr/share/rom/my.rom*\r\n\r\n*\\[MYROMNAME2\\]*\r\n\r\npath=/usr/share/rom/my2.rom\r\n"
},
{
"alpha_fraction": 0.5803571343421936,
"alphanum_fraction": 0.5892857313156128,
"avg_line_length": 13.272727012634277,
"blob_id": "4bd7cbd91482bf51d055bdeb8506e34c92286cdb",
"content_id": "e3bbf946bd04f096bf587e71f3747a91036732b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 336,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 22,
"path": "/docs/developer_manual/orixsdk_macros/mfree.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Mfree (free pointer)\r\n\r\n## Description\r\n\r\nPerforms a mfree memory\r\n\r\n## usage\r\n\r\nmfree (ptr)\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_memory.mac\"\r\n\r\n ; mymalloc is a 2 bytes zp ptr\r\n mfree (mymalloc)\r\n rts\r\n```\r\n\r\nCall [XFREE](../../../kernel/primitives/xfree/) function.\r\n"
},
{
"alpha_fraction": 0.6898102164268494,
"alphanum_fraction": 0.6973026990890503,
"avg_line_length": 25.342105865478516,
"blob_id": "bceb93e42888d64db8fc875b3d0d5c014c8bf05e",
"content_id": "a63fa3138912fdbf935870ccf44a9d4c7a72aba2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2002,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 76,
"path": "/docs/kernel/primitives/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Summary\n\nKernel calls are done with brk VALUE.\n\nSome parameter are set into A,X,Y and some others ones needs to be set in Kernel zp memory.\n\nOrix-sdk must be used in order to manage in the right way kernel calls\n\n```markmap\n\n## Args from commandline\n\n* [XMAINARGS](xmainargs) : get command line and build argv/argc\n* [XGETARGV](xgetargv) : get an argv from xmainargs struct\n\n## Numbers\n\n* [XBINDX](xbindx) : convert a number to decimal\n* [XHEXA](xhexa) : convert a number to hexadecimal and printit\n* [XDECIM](xdecim) : display a number to the output\n* [XDECAY](xdecay) : Convert a string into a number (16 bits)\n\n## Memory\n\n* [XMALLOC](xmalloc) : allocate memory from main memory\n* [XFREE](xfree) : free memory\n\n## Files\n\n* [XOPEN](xopen) : Open a file\n* [XWRITE](xwrite) : Write bytes to file\n* [XFREAD](xfread) : read bytes from file\n* [XCLOSE](xclose) : Close file\n* [XFSEEK](xfseek) : Seek into files\n* [XMKDIR](xmkdir) : Create a folder\n* [XGETCWD](xgetcwd) : get current path\n* [XPUTCWD](xputcwd) : change current path\n* [XOPENDIR](xopendir) : opendir\n* [XREADDIR](xreaddir) : readdir (return a struct with the files)\n* [XCLOSEDIR](xclosedir) : readdir (return a struct with the files)\n* [XRM](xrm) : remove file\n\n## Text mode\n\n* [XWR0](xwr0) : Display a char\n* [XWSTR0](xwstr0) : Display a string\n* [XCRLF](xcrlf) : Returns to the next line\n* [XSCROB](xscrob) : scroll from bottom to top\n* [XSCROH](xscroh) : scroll from top to bottom\n* [XTEXT](xtext) : switch to text mode\n\n## Graphic mode\n\n* [XHIRES](xhires) : start graphic mode\n\n## Numbers\n\n* [XBINDX](xbindx) : convert a 16 bits number into decimal\n* [XDECIM](xdecim) : displays a 16 bits number into decimal mode\n* [XHEXA](xhexa) : convert a 8 bits number into hexadecimal\n\n## Execute\n\n* [XEXEC](xexec) : Execute binary\n\n## Keyboard\n\n* [XRDW0](xrdw0) : Get a char from keyboard (wait for a key)\n* [XRD0](xrd0) : check if a char is typed on the keyboard (continue execution)\n\n## Kernel values\n\n* [XVALUES](xvalues)\n* [XVARS](xvars)\n\n```\n"
},
{
"alpha_fraction": 0.5650722980499268,
"alphanum_fraction": 0.5951056480407715,
"avg_line_length": 18.54347801208496,
"blob_id": "6a04c377c1f4cab6afbf66a1b2c0917fe2218e05",
"content_id": "d46dd7a1ce38e5b47df9ec9b81e2da8cb12272c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 899,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 46,
"path": "/doxygen/doxybook_output/Files/vi__editor__switch__off__cursor_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_editor_switch_off_cursor.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_editor_switch_off_cursor.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_editor_switch_off_cursor](Files/vi__editor__switch__off__cursor_8s.md#function-vi-editor-switch-off-cursor)**() |\n\n\n## Functions Documentation\n\n### function vi_editor_switch_off_cursor\n\n```cpp\nvi_editor_switch_off_cursor()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_editor_switch_off_cursor\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n lda (vi_ptr_screen),y ; display cursor\n and #%01111111\n sta (vi_ptr_screen),y ; display cursor\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7868852615356445,
"alphanum_fraction": 0.7868852615356445,
"avg_line_length": 11.199999809265137,
"blob_id": "a89702f11b21cea569e39c30ccc8fdc00319a765",
"content_id": "d9c9eb607ecd1b5cdbc0ef82b18593abd94e92de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 61,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/docs/commands/quintes.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Quintessential demo\n\nStart quintessential demo \n\n/#quintes\n"
},
{
"alpha_fraction": 0.5520231127738953,
"alphanum_fraction": 0.5770713090896606,
"avg_line_length": 18.58490562438965,
"blob_id": "97f2ed3931fe14615598bea8f88057ebfcfa230f",
"content_id": "fb92ecee34e0a18b5307ba93915658d9a078c40a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1038,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 53,
"path": "/doxygen/doxybook_output/Files/vi__ypos__screen__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ypos_screen_plus_plus.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ypos_screen_plus_plus.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ypos_screen_plus_plus](Files/vi__ypos__screen__plus__plus_8s.md#function-vi-ypos-screen-plus-plus)**() |\n\n\n## Functions Documentation\n\n### function vi_ypos_screen_plus_plus\n\n```cpp\nvi_ypos_screen_plus_plus()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_ypos_screen_plus_plus\n ; ypos_screen=ypos_screen+1\n ; Returns 01 in A if we reached the last line\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n cmp #VI_LAST_LINE_EDITOR\n beq @no_add\n clc\n adc #$01\n sta (vi_struct),y\n lda #$00\n rts\n@no_add:\n ;\n lda #IS_LAST_LINE_OF_SCREEN_TEXT\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5968899726867676,
"alphanum_fraction": 0.619617223739624,
"avg_line_length": 20.435897827148438,
"blob_id": "4d58b09c647fc843c2ffbff28757eaf21050a7bd",
"content_id": "3cf39fcd41e89df747800256f4ad14025f7cc08e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 39,
"path": "/doxygen/doxybook_output/Files/vi__ptr__file__used__plus__plus__and__check__eof_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_plus_plus_and_check_eof.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_plus_plus_and_check_eof.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_file_used_plus_plus_and_check_eof](Files/vi__ptr__file__used__plus__plus__and__check__eof_8s.md#function-vi-ptr-file-used-plus-plus-and-check-eof)**() |\n\n\n## Functions Documentation\n\n### function vi_ptr_file_used_plus_plus_and_check_eof\n\n```cpp\nvi_ptr_file_used_plus_plus_and_check_eof()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_ptr_file_used_plus_plus_and_check_eof\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7137404680252075,
"alphanum_fraction": 0.7137404680252075,
"avg_line_length": 11.476190567016602,
"blob_id": "ceedb62bc2aa32d81a431524c86e7afa7fa2508c",
"content_id": "749974357019f062581c318547885b0165c489b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 262,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 21,
"path": "/docs/commands/mkdir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# mkdir\n\n## Introduction\n\nCreate a folder\n\n## SYNOPSYS\n\n+ /#mkdir PATH\n\n## DESCRIPTION\n\nCreate a folder. -p (recursive mode) option is not available\n\n## EXAMPLES\n\n+ mkdir /opt\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/mkdir.asm\n"
},
{
"alpha_fraction": 0.5335366129875183,
"alphanum_fraction": 0.5569105744361877,
"avg_line_length": 15.965517044067383,
"blob_id": "697409d6f33ab08959c10a387eea7fe7bb120435",
"content_id": "5de4df1806e752112baa9a684bb6b7d6f349f7d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 984,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 58,
"path": "/doxygen/doxybook_output/Files/vi__decal__text_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_decal_text.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_decal_text.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_decal_text](Files/vi__decal__text_8s.md#function-vi-decal-text)**() |\n\n\n## Functions Documentation\n\n### function vi_decal_text\n\n```cpp\nvi_decal_text()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_decal_text\n ; A & X address dest to copy\n\n sta DECCIB\n stx DECCIB+1\n\n lda vi_ptr_file_used ; address first char\n ldy vi_ptr_file_used+1\n sta DECDEB\n sty DECDEB+1\n\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta DECFIN\n iny\n lda (vi_struct),y\n sta DECFIN+1\n\n BRK_TELEMON XDECAL\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.48706239461898804,
"alphanum_fraction": 0.5235921144485474,
"avg_line_length": 13.282608985900879,
"blob_id": "082c1e27eb1dc5d72c5c7c03ef577e472a6a7461",
"content_id": "4a3684b37a6b15f538e4e93639863b42c9a657db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 657,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 46,
"path": "/docs/tools_docs/vi/Files/vi__length__file__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_length_file_sub_sub.s\n\n---\n\n# vi_length_file_sub_sub.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_length_file_sub_sub](Files/vi__length__file__sub__sub_8s.md#Routine-vi-length-file-sub-sub)** |\n\n\n## Routine documentation\n\n### Routine vi_length_file_sub_sub\n\n```ca65\nvi_length_file_sub_sub\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_length_file_sub_sub\n ; add length_file=length_file--\n lda vi_length_file\n bne @out\n dec vi_length_file+1\n\n@out:\n dec vi_length_file\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5187085270881653,
"alphanum_fraction": 0.5470730066299438,
"avg_line_length": 17.0108699798584,
"blob_id": "5c02741696899e5f67b9cce9016caeecfd56cb09",
"content_id": "7fca682594ee903284082b3f8a83ebed274f9ba4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3314,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 184,
"path": "/doxygen/doxybook_output/Files/vi__displays__info_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_displays_info.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_displays_info.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_displays_info](Files/vi__displays__info_8s.md#function-vi-displays-info)**() |\n| | **[displays_debug](Files/vi__displays__info_8s.md#function-displays-debug)**() |\n\n\n## Functions Documentation\n\n### function vi_displays_info\n\n```cpp\nvi_displays_info()\n```\n\n\n### function displays_debug\n\n```cpp\ndisplays_debug()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_displays_info\n\n lda vi_fileopened\n\n beq @is_new_file\n\n\n ; Displays filename at the bottom\n lda #$22 ; \" char\n sta VI_COMMANDLINE_VIDEO_ADRESS\n\n ldy #$00\n ldx #$01\n@L1:\n lda (vi_arg1),y\n beq @end_display_filename\n sta VI_COMMANDLINE_VIDEO_ADRESS,x\n inx\n iny\n bne @L1\n@end_display_filename:\n lda #$22 ; \" char\n sta VI_COMMANDLINE_VIDEO_ADRESS,x\n rts\n\n@is_new_file:\n ldy #$00\n\n@L10:\n lda msg_nofile,y\n beq @end_display_msg_nofile\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n bne @L10\n\n@end_display_msg_nofile:\n sty vi_pos_debug\n jsr displays_debug\n\n rts\n.endproc\n\n\n.proc displays_debug\n lda #$02\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n ; y\n iny\n ; Display ptr_last_char_file offset\n sty RES\n ldy #vi_struct_data::ptr_last_char_file+1 ; last char file\n lda (vi_struct),y\n BRK_TELEMON XHEXA\n ; A and Y\n sty vi_tmp1\n ldy RES\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n lda vi_tmp1\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n\n sty RES\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n BRK_TELEMON XHEXA\n ; A and Y\n sty vi_tmp1\n ldy RES\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n lda vi_tmp1\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n\n lda #$02\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n\n iny\n sty RES\n ; pos_file_addr\n\n\n\n lda vi_ptr_file_used+1\n BRK_TELEMON XHEXA\n ; A and Y\n sty vi_tmp1\n ldy RES\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n lda vi_tmp1\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n\n iny\n sty RES\n ; pos_file_addr\n\n lda vi_ptr_file_used\n BRK_TELEMON XHEXA\n ; A and Y\n sty vi_tmp1\n ldy RES\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n lda vi_tmp1\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n\n\n ; Display length\n lda #$06\n sta VI_COMMANDLINE_VIDEO_ADRESS+22\n\n lda #<($bb80+40*27+23)\n sta TR5\n lda #>($bb80+40*27+23)\n sta TR5+1\n\n lda #$20\n sta DEFAFF\n\n ldx #$01\n ldy vi_length_file+1\n lda vi_length_file\n BRK_TELEMON XBINDX\n\n lda #$05\n sta VI_COMMANDLINE_VIDEO_ADRESS+28\n\n ; Display char under the cursor\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #$0D\n bne @notcrlf\n lda #'$'\n@notcrlf:\n sta $bb80+40*27+29\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6433120965957642,
"alphanum_fraction": 0.6496815085411072,
"avg_line_length": 12.272727012634277,
"blob_id": "4a9ef3e55e9d79e1defc0ff12f1d752439fcfb93",
"content_id": "9a590cc2b2cf9c3f6840becdd25fbac4ca427f7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 11,
"path": "/docs/kernel/xvalues.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XVARS\r\n\r\nGet vars from kernel\r\n\r\n## Get Os string (Orix)\r\n\r\nXVARS_KERNEL_OSNAME = 9\r\n\r\nldx #XVARS_KERNEL_OSNAME\r\nBRK_KERNEL XVARS\r\n; A and X contains\r\n"
},
{
"alpha_fraction": 0.6153377890586853,
"alphanum_fraction": 0.6363359689712524,
"avg_line_length": 17.559524536132812,
"blob_id": "7d7ae372771b3a2fae18c42aeaf9a7616d756155",
"content_id": "028d31b0004af3a188d745107282a266fb4400b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3286,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 168,
"path": "/docs/kernel/primitives/xvalues.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XVALUES ROUTINE\r\n\r\nGet vars from kernel\r\n\r\n## Get Free malloc table\r\n\r\nX = KERNEL_XVALUES_FREE_MALLOC_TABLE\r\nKERNEL_XVALUES_FREE_MALLOC_TABLE = $02\r\n\r\nReturns in A and Y a copy of free malloc table. This ptr must be free when the buffer is useless\r\n\r\n### Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n KERNEL_XVALUES_FREE_MALLOC_TABLE = $02\r\n\r\n ldx #KERNEL_XVALUES_FREE_MALLOC_TABLE\r\n\r\n BRK_TELEMON $2D ; XVALUES\r\n\r\n rts\r\n```\r\n\r\n## Get Busy malloc table\r\n\r\nX = KERNEL_XVALUES_BUSY_MALLOC_TABLE\r\nKERNEL_XVALUES_FREE_MALLOC_TABLE = $07\r\n\r\nReturns in A and Y a copy of busy malloc table. This ptr must be free when the buffer is useless\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n KERNEL_XVALUES_FREE_MALLOC_TABLE = $07\r\n\r\n ldx #KERNEL_XVALUES_BUSY_MALLOC_TABLE\r\n\r\n BRK_TELEMON $2D ; XVALUES\r\n\r\n rts\r\n```\r\n\r\n## Get processName of the current chunk\r\n\r\nY contains the id of the malloc busy table\r\n\r\nY = the pid\r\nX = 08\r\n\r\nIt returns in A & Y the ptr of the process name.\r\n\r\nKERNEL_XVALUES_GET_CURRENT_PROCESSNAME_FROM_PID = $08\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n KERNEL_XVALUES_GET_CURRENT_PROCESSNAME_FROM_PID = $07\r\n\r\n ldx #KERNEL_XVALUES_GET_CURRENT_PROCESSNAME_FROM_PID\r\n ldy #03 ; Get the name of pid = 3\r\n\r\n BRK_TELEMON $2D ; XVALUES\r\n\r\n rts\r\n```\r\n\r\n## Get the path of an opened file\r\n\r\nKERNEL_XVALUES_PATH_FROM_FD = $09\r\nY must contains the fd.\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n KERNEL_XVALUES_PATH_FROM_FD = $09\r\n\r\n ldx #KERNEL_XVALUES_PATH_FROM_FD\r\n ldy #03 ; Get the name of pid = 3\r\n\r\n BRK_TELEMON $2D ; XVALUES\r\n ; A and Y contains the ptr of path\r\n\r\n rts\r\n```\r\n\r\n## Get the position in the opened file\r\n\r\nKERNEL_XVALUES_GET_FTELL_FROM_FD = $0A\r\n\r\nX=KERNEL_XVALUES_GET_FTELL_FROM_FD\r\n\r\nIt returns in A, X, Y and RES, the position in the file\r\n\r\n## Get the ptr of the pid list\r\n\r\nKERNEL_XVALUES_GET_PROCESS_ID_LIST=$0C\r\n\r\n; A and Y contains the ptr\r\n\r\n## Get the processname with the PID\r\n\r\nA = contains the POD\r\n\r\nX = KERNEL_XVALUES_GET_PROCESS_NAME_WITH_PID\r\n\r\nA & Y = contains the ptr of processname\r\n\r\n## Get the number of malloc available in the kernel\r\n\r\nX = 5\r\n\r\nReturns in A the number of the malloc\r\n\r\n## Get the magic number of the kernel\r\n\r\nX = 6\r\n\r\nReturns in A the magic number\r\n\r\n## Get the max process configured in the kernel\r\n\r\nX = 8\r\n\r\nOutput :\r\n\r\nA contains the max process\r\n\r\n## Get Os string (Orix)\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n XVARS_KERNEL_OSNAME = 9\r\n\r\n ldx #XVARS_KERNEL_OSNAME\r\n BRK_TELEMON XVARS\r\n ; A and X contains the ptr to osname\r\n rts\r\n```\r\n\r\n!!! warning \"It will be available in Kernel v2023.2\"\r\n\r\n## Get an empty bank\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n KERNEL_XVALUES_GET_FREE_BANK = $10\r\n\r\n ldx #KERNEL_XVALUES_GET_FREE_BANK\r\n ldy #$00 ; RAM type\r\n BRK_TELEMON $2D ; XVALUES\r\n ; Y contains the id of the bank\r\n ; X contains set ($343 register)\r\n ; A the bank ($321 register)\r\n ; If there is no available bank, Y=0\r\n rts\r\n```\r\n\r\n!!! warning \"It will be available in Kernel v2023.2\"\r\n\r\n!!! warning \"It can only allocate the first 8 banks\"\r\n\r\n!!! warning \"It does not verify the content of any bank\"\r\n\r\n!!! warning \"The content is reset every reboot\"\r\n"
},
{
"alpha_fraction": 0.7149849534034729,
"alphanum_fraction": 0.7375752925872803,
"avg_line_length": 35.356163024902344,
"blob_id": "329a631539f10ad4fa2dbac0aaeb463d8a0d217d",
"content_id": "7c5787332c5234d1c8ad74da1a69b265e7c33a1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2696,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 73,
"path": "/docs/developer_manual/basic11_informations.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# basic11 rom\n\nDans tous les cas, les valeurs de RND en $FA ont été copiées en avance\n\n## Si on tape : /#basic11\n\nCela lance la ROM\n\n## Si on tape : /#basic11 \"TAPEFILE\"\n\n* Cela va chercher la présence d'un fichier db dans /var/cache/basic11/[FIRSTLETTEROFTAPEFILE]/TAPEFILE.db\n\n* Si TAPEFILE.db est présent, il va lire les 1er octets du fichiers pour poker en $F1 les I/O des joysticks, sinon il va démarrer la banque 6 qui est la ROM par défaut qui contient le path courant : /home/basic11/\n\n* Le code va regarder l'id de la ROM (stockée en $F2 suite au load des confs au point précédent)\n\n* Cela va détecter aussi si le device par défaut est la sdcard ou la clé usb\n\n* Le code va concaténer /usr/share/basic11/basic avec le device courant (us ou sd) puis l'id de la rom converti en ascii\n\n* La rom va être chargée en ram principale puis basic11 va copier un driver en ram principale. Si la rom demandée n'est pas trouvée, le programme s'arrête avec un message d'erreur.\n\n* Ce driver passe en RAM 0 (banque 0) copie la ROM chargée en ram principale, puis va patcher le path par défaut pour accéder au .tap demandé. Ainsi, la ROM serait présente en RAM 0, avec le path par défaut tel que :\n'/usr/share/basic11/3/' pour 3dfongus\n\nAinsi, la ROM a en argument 3Dfongus, et la rom charge donc à partir de là.\n\nLe code gère un maximum de 9 roms en mode usb, et 9 en rom sd. Un soft défini avec une ROM 10 ne fonctionnera pas, c'est à dire que la rom ne sera pas chargée, et un message d'erreur dira qu'il ne trouve pas la rom.\n\n## Si on tape : /#basic11 -l\n\nCela va ouvrir le fichier /var/cache/basic11/basic11.db\n\nEt cela va lire la clé du .tap et le nom du soft tronqué à 29 chars.\n\nL'espace et le ctrl+c fonctionne ici\n\n## Fonctionnement global\n\nSi la rom est lancée sans .tap ou avec un .tap, systématiquement, les valeurs de RND seront écrites en dur en RAM de $FA à $FF. Cela interfera à terme avec la ROM basic oric-1\n\n# Format du fichier \"maindb\" (basic11.db)\n\n## Version 1\n\n* 1er octet : numero de version du fichier db (actuel : 1)\n\n* puis la liste des noms de fichiers puis le titre tels que :\nfilenametap8bytesLength ; name_software '\\0'\n\n* en dernier octet à la fin du fichier, nous avons l'octet $ff qui signale la fin du fichier (ceci permettant de simplifier le code de lecture)\n\n\n\n# Format du fichier .db d'un soft\n\n* version_bin : 1 byte (binary)\n\n* rombasic11 : 1 byte, id of the rom\n\n* fire2_joy : keyboard fire2 matrix\n\n* fire3_joy : keyboard fire2 matrix\n\n* down_joy : keyboard fire2 matrix\n\n* right_joy : keyboard fire2 matrix\n\n* left_joy : keyboard fire2 matrix\n\n* fire1_joy : keyboard fire2 matrix\n\n* up_joy : keyboard fire2 matrix\n\n\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.6172839403152466,
"avg_line_length": 15.71875,
"blob_id": "982194f044b619ee6dbdc93e23240e06f2a0563b",
"content_id": "c2276615cc594325888b26313a0cd8acf63bdd16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 32,
"path": "/docs/kernel/primitives/xmalloc.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XMALLOC (malloc)\r\n\r\n## Description\r\n\r\nAllocate memory. Use orix-sdk with macro to handle XMALLOC\r\n\r\n## Input\r\n\r\nA and Y : number of bytes\r\n\r\n## Output\r\n\r\nA&Y = $0000 : OOM or others errors\r\nelse\r\nA&Y : ptr\r\n\r\n## Example\r\n\r\n``` ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<4000\r\n ldy #>4000\r\n\r\n BRK_TELEMON XMALLOC\r\n ; A & Y contains a ptr\r\n rts\r\n```\r\n\r\n!!! tip \"See [malloc](../../../developer_manual/orixsdk_macros/malloc) macro from orix-sdk to use it easily \"\r\n\r\n!!! warning \"Number of malloc are limited in kernel side for all programs running\"\r\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 6.666666507720947,
"blob_id": "176e35319910540328ff723f70d5a74bd88ca75f",
"content_id": "dca13be73d567000dee6b4e7e4a47ca69d56bb05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 3,
"path": "/docs/kernel/primitives/xzap.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XZAP\r\n\r\nSend zap sound"
},
{
"alpha_fraction": 0.48685258626937866,
"alphanum_fraction": 0.5091633200645447,
"avg_line_length": 20.092437744140625,
"blob_id": "c36e2128caf9685b52231ff26877868bec052842",
"content_id": "8831de95b2159a99492b1e37e36f03bcd21c77a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2510,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 119,
"path": "/doxygen/doxybook_output/Files/vi__search__next__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_next_line.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_next_line.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_search_next_line](Files/vi__search__next__line_8s.md#function-vi-search-next-line)**() |\n\n\n## Functions Documentation\n\n### function vi_search_next_line\n\n```cpp\nvi_search_next_line()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_search_next_line\n ; vi_ptr_file_used is on $0D\n ; and the exit of the subroutine must be :\n ; -last char\n\n lda #$00\n sta vi_tmp2 ; Contains the number of line when max number of_column has reached\n\n\n ; jsr vi_check_eof\n ; cmp #IS_EOF\n ; beq @eof\n\n ; ; The current line is empty : *$0D*, $0A, 'T',$0D, 0A\n ; ; The current line is chars : *$0D*, $0A, 'T',$0D, 0A\n ; ; The current line is the end of the file : *$0D*, $0A\n ; ; The current line is the end of the file : *'T'*\n ; jsr vi_ptr_file_used_plus_plus\n\n ; ; The current line is empty : $0D, *$0A*, 'T',$0D, 0A\n ; ; The current line is chars : $0D, *$0A*, 'T',$0D, 0A\n ; ; The current line is the end of the file : $0D, *$0A*\n ; ; The current line is the end of the file : 'T' ** err\n\n ; jsr vi_check_eof\n ; cmp #IS_EOF\n ; beq @eof\n\n\n ; ldy #$00\n ; lda (vi_ptr_file_used),y\n ; cmp #LF\n ; bne @continue\n\n ; jsr vi_ptr_file_used_plus_plus\n\n ; jsr vi_check_eof\n ; cmp #IS_EOF\n ; beq @eof\n\n ldx #$00\n@continue:\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @first_char_found\n\n jsr vi_ptr_file_used_plus_plus\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n inx\n cpx #VI_EDITOR_MAX_COLUMN\n bne @continue\n inc vi_tmp2\n ldx #$00\n\n jmp @continue\n\n\n@eof:\n ; A=IS_EOF\n rts\n@first_char_found:\n jsr vi_ptr_file_used_plus_plus\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #LF\n bne @exit\n\n jsr vi_ptr_file_used_plus_plus\n jsr vi_check_eof\n cmp #IS_EOF\n beq @eof\n\n@exit:\n lda #$01\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5886792540550232,
"alphanum_fraction": 0.6037735939025879,
"avg_line_length": 14.5625,
"blob_id": "bbfbd9542fa78058e7e192abb2613be210cbbd51",
"content_id": "b00217c12e9051db9930a3e44baeddc951b701c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 16,
"path": "/docs/kernel/primitives/xtext.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XTEXT\r\n\r\n## Description\r\n\r\nSwitch to text mode\r\n\r\n## Example\r\n\r\n``` ca65\r\n .include \"telestrat.inc\"\r\n ldx #$00\r\n BRK_TELEMON XTEXT\r\n rts\r\n```\r\n\r\n!!! tip \"See [setscreen](../../developer_manual/orixsdk_macros/setscreen) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.708737850189209,
"alphanum_fraction": 0.708737850189209,
"avg_line_length": 8.809523582458496,
"blob_id": "cec36ce7f31b366a4b19eedfa4c18e5f650b7556",
"content_id": "99de4f08a86fd8f56c9b17376a5bf702f11c0f90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 21,
"path": "/docs/commands/ps.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# ps\n\n## Introduction\n\nDisplays process list\n\n## SYNOPSYS\n\n+ #ps\n\n## DESCRIPTION\n\nDisplays process list\n\n## EXAMPLES\n\n+ ps\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/ps.asm\n"
},
{
"alpha_fraction": 0.6626728177070618,
"alphanum_fraction": 0.704147458076477,
"avg_line_length": 25.125,
"blob_id": "5021da87193c698912f7df5ecd38d1b9e1ff83be",
"content_id": "3e5b12ef49f3ec99be5f9dd173ab05cc4f6f3a5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1085,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 40,
"path": "/docs/update/2023_1.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# 2023.1\r\n\r\n## Kernel\r\n\r\n* add ftell in XVALUES primitive\r\n* add getProcessName from an id malloc entry\r\n* get table copy of free malloc table (from XVALUES)\r\n* get table copy of busy malloc table (from XVALUES)\r\n* when MAX_PROCESS is reached, kernel displays a kernel panic\r\n* fix bug when wronly us set only\r\n* fix two bugs for dynamic relocation format (2)\r\n\r\n## Shell\r\n\r\n* lsmem is recoded from scratch\r\n* /etc/autoboot is checked at the start of shell in order to boot any submit script (autoboot is a .sub file without extension)\r\n* pstree added\r\n* It's now possible to add \"#\" on command line to specify a comment\r\n\r\n## Install\r\n\r\nDownload : https://repo.orix.oric.org/dists/official/tgz/6502/cardridge.tgz\r\n\r\nuntar/gunzip on the twilighte board device.\r\n\r\n### orixcfg (below 2023.2 orixcfg version)\r\n\r\nUnder Orix\r\n\r\n```bash\r\n/# cd /usr/share/carts/2023.1/\r\n/usr/share/carts/2023.1# orixcfg -r -s 4 kernelus.r64\r\n```\r\n\r\n## orixcfg (equal or greater than 2023.2 orixcfg version)\r\n\r\n```bash\r\n/# cd /usr/share/carts/2023.1/\r\n/usr/share/carts/2023.1# orixcfg -k kernelus.r64\r\n```\r\n"
},
{
"alpha_fraction": 0.5778210163116455,
"alphanum_fraction": 0.5914396643638611,
"avg_line_length": 15.724138259887695,
"blob_id": "07d024b29ab1fbd84c7235a6a9283d9a3cf5afce",
"content_id": "2d459ff4a22e5586ac4d336f41dbc464a0eb2dd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 29,
"path": "/docs/developer_manual/orixsdk_macros/fwrite.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# FWRITE macro\r\n\r\n## Description\r\n\r\nWrite bytes into file\r\n\r\n## Usage\r\n\r\nfwrite ptr, size, count, fp\r\n\r\nnote:\r\n\r\n* ptr may be : (ptr), address\r\n* size may be: (ptr), address\r\n* fp may be : address, #value, {address,y}\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_file.mac\"\r\n\r\n [...]\r\n fwrite (myptr), 1080, 1, MAN_FP ; myptr is from a malloc for example\r\n fclose(MAN_FP)\r\n rts\r\n```\r\n\r\nSee [XFWRITE](../../../kernel/primitives/xfwrite) kernel primitive\r\n"
},
{
"alpha_fraction": 0.5491143465042114,
"alphanum_fraction": 0.5772947072982788,
"avg_line_length": 17.264705657958984,
"blob_id": "d6d1bc0e638db2d81e21f3e712d4f9d98bff6260",
"content_id": "e9fe9e5648f48ddc0f4db719091729816be39ece",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1242,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 68,
"path": "/doxygen/doxybook_output/Files/vi__search__previous__line__beginning_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_previous_line_beginning.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_previous_line_beginning.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_search_previous_line_beginning](Files/vi__search__previous__line__beginning_8s.md#function-vi-search-previous-line-beginning)**() |\n\n\n## Functions Documentation\n\n### function vi_search_previous_line_beginning\n\n```cpp\nvi_search_previous_line_beginning()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_search_previous_line_beginning\n ; A = 1 found $0D\n ; A = 0 find beginning\n ; X contains the number of char parsed\n ldx #$00\n\n@L1:\n jsr vi_ptr_file_used_sub_sub\n cmp #IS_BEGINNING_OF_THE_FILE\n beq @beginning_of_file\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @found_0D\n cmp #LF\n beq @dex\n inx\n bne @L1\n\n@dex:\n jmp @L1\n\n@found_0D:\n lda #$01\n rts\n\n@beginning_of_file:\n lda #$00\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.4883268475532532,
"alphanum_fraction": 0.5194552540779114,
"avg_line_length": 15.0625,
"blob_id": "89b85cdbd38bc6963ef1a5f6d50392dfe52bd717",
"content_id": "a319e6d4b4cd59366d3dd16c30bcba604bb9fcba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2056,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 128,
"path": "/doxygen/doxybook_output/Files/vi__compute__last__text__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_compute_last_text_line.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_compute_last_text_line.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_compute_last_text_line](Files/vi__compute__last__text__line_8s.md#function-vi-compute-last-text-line)**() |\n\n\n## Functions Documentation\n\n### function vi_compute_last_text_line\n\n```cpp\nvi_compute_last_text_line()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_compute_last_text_line\n ; A & Y the current ptr\n ; X the id of the text line ?\n sta vi_ptr2\n sty vi_ptr2+1\n\n stx vi_tmp2\n\n lda #VI_LAST_LINE_EDITOR\n sec\n sbc vi_tmp2\n sta vi_tmp2\n\n\n@L1:\n\n\n jsr check_eof_vi_ptr2\n cmp #IS_EOF\n beq @exit\n ldy #$00\n lda (vi_ptr2),y\n cmp #CR\n beq @found\n\n@continue:\n inc vi_ptr2\n bne @out_compare\n inc vi_ptr2+1\n@out_compare:\n jmp @L1\n\n@found:\n\n inc vi_ptr2\n bne @out_compare2\n inc vi_ptr2+1\n@out_compare2:\n\n jsr check_eof_vi_ptr2\n cmp #IS_EOF\n beq @exit\n\n lda (vi_ptr2),y\n cmp #LF\n bne @S1\n\n inc vi_ptr2\n bne @out_compare3\n inc vi_ptr2+1\n@out_compare3:\n\n jsr check_eof_vi_ptr2\n cmp #IS_EOF\n beq @exit\n\n\n@S1:\n dec vi_tmp2\n bne @continue\n@exit:\n lda vi_ptr2\n ldy vi_ptr2+1\n\n rts\n\n\ncheck_eof_vi_ptr2:\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr2\n cmp vi_tmp1\n bne @not_eof\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr2+1\n cmp vi_tmp1\n bne @not_eof\n\n lda #IS_EOF\n rts\n\n@not_eof:\n lda #$01\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6742857098579407,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 22.714284896850586,
"blob_id": "aa3d5f9588bccae3931260c53b57a7cfb81939e2",
"content_id": "e92a84b09cbe34b36506d59aa3fc8ef4f6890dc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/docs/hardware/disk_controler_and_twilighte_board.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Disk controler and twilighte board\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.4863876402378082,
"alphanum_fraction": 0.5002031922340393,
"avg_line_length": 28.382715225219727,
"blob_id": "b3aa1ca591597bfa1c2c68416e20bb65c67f0fa8",
"content_id": "7eb7532c4f420a7b49f87f5ad85a40aef76cfeef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2461,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 81,
"path": "/docs/kernel/primitives/xopen.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XOPEN\r\n\r\n## Description\r\n\r\nopen a file\r\n\r\n## Input\r\n\r\n* Y : flag open mode\r\n* A&X : path filename\r\n\r\n## Output\r\n\r\n* AX contains $FF if it does not exists\r\n* A : FD id if file is found\r\n\r\n## Modify\r\n\r\n* RES, A X Y, XOPEN_SAVE XOPEN_FLAGS, XOPEN_RES_SAVE, XOPEN_SAVEA\r\n* with XMALLOC call TR7\r\n\r\n## Internal kernel calls\r\n\r\n* XMALLOC\r\n* create_file_pointer\r\n\r\n## Changelog\r\n\r\n### Since kernel 2022.2\r\n\r\nFlag | File exists | behaviour\r\nO_WRONLY | No | return Null\r\nO_WRONLY | Yes | open and return FD\r\nO_RDONLY | Yes | open and return FD\r\nO_WRONLY | No | return Null\r\nO_CREAT | No | Create file and open and return FD\r\nO_CREAT | Yes | open and return FD\r\n\r\n## Usage\r\n\r\n``` ca65\r\n lda #<str\r\n ldx #>str\r\n ldy #O_CREAT\r\n\r\n BRK_TELEMON XOPEN\r\n rts\r\nstr:\r\n .asciiz \"myfile\"\r\n```\r\n\r\n!!! tip \"See [fopen](../../../developer_manual/orixsdk_macros/fopen) macro from orix-sdk to use it\"\r\n\r\n!!! fail \"XOPEN does not manage './' or '../' calls\"\r\n\r\n!!! warning \"Multiples opened files are working since kernel 2022.2\"\r\n\r\n!!! warning \"Max 2 opened files are working since kernel 2022.2, it's a constant defined in kernel build\"\r\n\r\n??? info \"Flags behaviors since kernel 2023.1\"\r\n | Flag | File exists |Behaviour|\r\n | ----------- | ------------------------------------ ||\r\n | O_WRONLY & O_CREAT | No | Create file, open and return FD\r\n | O_WRONLY | No | return Null (Changed since kernel 2023.1)\r\n | O_WRONLY | Yes | open and return FD\r\n | O_RDONLY | Yes | open and return FD\r\n | O_WRONLY | No | return Null\r\n | O_CREAT | No | Create file and open and return FD\r\n | O_CREAT | Yes | open and return FD\r\n\r\n??? info \"Flags behaviors before kernel 2023.1\"\r\n\r\n | Flag | File exists |Behaviour|\r\n | ----------- | ------------------------------------ ||\r\n |O_WRONLY & O_CREAT | No | Create file, open and return FD\r\n |O_WRONLY | No | open and return FD\r\n |O_WRONLY | Yes | open and return FD\r\n |O_RDONLY | Yes | open and return FD\r\n |O_WRONLY | No | return Null\r\n |O_CREAT | No | Create file and open and return FD\r\n |O_CREAT | Yes | open and return FD\r\n"
},
{
"alpha_fraction": 0.6626016497612,
"alphanum_fraction": 0.7154471278190613,
"avg_line_length": 10.714285850524902,
"blob_id": "af179f7e0bf6baa68a8e3817ad6cf3934bf17d3f",
"content_id": "31624b885adf8c384a2ce020c152efd30763175d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 246,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 21,
"path": "/docs/commands/lscpu.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# lscpu\n\n## Introduction\n\nDisplays cpu info. It detects : 6502, 65c02 and 65816\n\n## SYNOPSYS\n\n+ #lscpu\n\n## DESCRIPTION\n\nDisplays cpu info\n\n## EXAMPLES\n\n+ lscpu\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/lscpu.asm\n"
},
{
"alpha_fraction": 0.5641677379608154,
"alphanum_fraction": 0.5838627815246582,
"avg_line_length": 15.354166984558105,
"blob_id": "db211abee64fcde3dc3654779671b3389590bf72",
"content_id": "2e128dbe8d670348f0128ea5d0750e6a466cc757",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1574,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 96,
"path": "/kernel/docs/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# General information\n\n# Install environment development for developers\n\n* [Getting-started](getting-started)\n\n\n## Kernel primitives\n\n* [Primitives](primitives)\n* [Tools](mount)\n\n* Doxygen for [kernel](doxygen/kernel).\n\n## Samples for kernel calls\n\n### Get ctrl+c hook\n\n``` ca65\n .include \"telestrat.inc\"\n asl KBDCTC\n bcc @no_ctrl\n ; here is ctrl management\n@no_ctrl:\n rts\n\n```\n\n### Test kernel Error when fopen failed\n\n``` ca65\n ; Open\n ... \n BRK_KERNEL XOPEN \n cpy #$00\n bne @read_rom \n cmp #$00\n bne @read_rom \n\n ldx #$04 ; Get kernel ERRNO\n BRK_KERNEL XVARS\n sta userzp\n sty userzp+1\n\n ldy #$00\n lda (userzp),y ; get ERRNO from kernel\n cmp #ENOMEM\n bne @no_enomem_kernel_error\n PRINT str_enomem\n\n@no_enomem_kernel_error:\n cmp #ENOENT\n bne @no_enoent_kernel_error\n PRINT str_not_found\n```\n\n\n### Stop output when spacebar is pressed (for example)\n\n``` ca65\n .include \"telestrat.inc\"\n@L12:\n BRK_KERNEL XRD0\n bcs @no_char_action\n cmp #' ' ; Space pressed ?\n bne @no_char ; no continue\n\n lda myOffsetToManageSwitch ; One byte\n beq @inv_to_1\n\n lda #$00\n sta myOffsetToManageSwitch\n jmp @L12\n\n@inv_to_1:\n inc myOffsetToManageSwitch\n jmp @L12\n\n@no_char_action:\n lda myOffsetToManageSwitch\n beq @L12\n\n@no_char:\n ; No keypressed\n```\n\n### Displays a string\n\n``` ca65\n lda #<mystr\n ldy #>mystr\n BRK_KERNEL XWRSTR0\n rts\n mysstr:\n .asciiz \"hello\" \n``` \n\n"
},
{
"alpha_fraction": 0.5956738591194153,
"alphanum_fraction": 0.6006655693054199,
"avg_line_length": 36.5,
"blob_id": "48fc2bf2b53dc632eb01c5c7c24986a7fd7f9c09",
"content_id": "6405645cae865f291fafe40c7a2a02d4424b1aaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 601,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 16,
"path": "/doxygen/launch.sh",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "doxygen Doxyfile_vi\n~/bin/doxybook2 --input doc/xml/ --output doxybook_output_vi --config config.json\nfor f in $(find doxybook_output_vi/ | grep md)\ndo\n echo $f\n sed -i -e 's/cpp/ca65/g' \"$f\"\n sed -i -e 's/Functions Documentation/Routine documentation/g' \"$f\"\n sed -i -e 's/function/Routine/g' \"$f\"\n sed -i -e 's/Functions/Routine/g' \"$f\"\n sed -i -e 's/()//g' \"$f\"\n sed -i -e 's/\\/mnt\\/c\\/Users\\/plifp\\/OneDrive\\/oric\\/projets\\/orix-software\\/vi\\/src//g' \"$f\"\n sed -i -e 's/\\/Routines\\/subfunc\\/vi\\///g' \"$f\"\n\ndone\n\ncp doxybook_output_vi/* ../docs/tools_docs/vi -adpR\n\n"
},
{
"alpha_fraction": 0.5037453174591064,
"alphanum_fraction": 0.5402621626853943,
"avg_line_length": 14.70588207244873,
"blob_id": "aea2c3b7cc81fbec0edc1bd70455fb00f52d7d9c",
"content_id": "e9d7a11afb30bad2fe8159e3f2e80c9eacd24fb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1068,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 68,
"path": "/doxygen/doxybook_output_vi/Files/vi__search__previous__line__beginning_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_search_previous_line_beginning.s\n\n---\n\n# vi_search_previous_line_beginning.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_search_previous_line_beginning](Files/vi__search__previous__line__beginning_8s.md#Routine-vi-search-previous-line-beginning)** |\n\n\n## Routine documentation\n\n### Routine vi_search_previous_line_beginning\n\n```ca65\nvi_search_previous_line_beginning\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_search_previous_line_beginning\n ; A = 1 found $0D\n ; A = 0 find beginning\n ; X contains the number of char parsed\n ldx #$00\n\n@L1:\n jsr vi_ptr_file_used_sub_sub\n cmp #IS_BEGINNING_OF_THE_FILE\n beq @beginning_of_file\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @found_0D\n cmp #LF\n beq @dex\n inx\n bne @L1\n\n@dex:\n jmp @L1\n\n@found_0D:\n lda #$01\n rts\n\n@beginning_of_file:\n lda #$00\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5592592358589172,
"alphanum_fraction": 0.5666666626930237,
"avg_line_length": 10.272727012634277,
"blob_id": "3b972f9e52695f7fdedd384dcb2f0e28dfa5c56d",
"content_id": "7e9b25eab54df73c1f4136d23316f3d4b658e43a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 270,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 22,
"path": "/docs/developer_manual/orixsdk_macros/ping.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Ping\r\n\r\n## Description\r\n\r\nProduce a ping sound\r\n\r\n## Usage\r\n\r\nping\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_sound.mac\"\r\n\r\n ping\r\n rts\r\n\r\n```\r\n\r\nCall [XPING](../../../kernel/primitives/xping/) kernel function.\r\n"
},
{
"alpha_fraction": 0.4169696867465973,
"alphanum_fraction": 0.6745454668998718,
"avg_line_length": 85.84210205078125,
"blob_id": "585c7d94166a305ce758fc3871bc52e54fcfbb8b",
"content_id": "367c85c0f9c9091677797ebdad405412f1918f6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1650,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 19,
"path": "/doxygen/doc/html/search/variables_2.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['vi_5farg1_156',['vi_arg1',['../vi_8s.html#abe3cbd9bfcb577ef36a5a222f61203f5',1,'vi.s']]],\n ['vi_5fargc_157',['vi_argc',['../vi_8s.html#aae53a4d8f7ef38ac152a0350423b8f1c',1,'vi.s']]],\n ['vi_5fargv_158',['vi_argv',['../vi_8s.html#a62f7795ee167060eb27b1e3db7d7d23c',1,'vi.s']]],\n ['vi_5ffp_159',['vi_fp',['../vi_8s.html#a5a9929a47b0ba3db03915e19b3b68cd6',1,'vi.s']]],\n ['vi_5flast_5fline_5feditor_160',['VI_LAST_LINE_EDITOR',['../vi_8s.html#ac2df40bfbdee63703d0497bd65ad73d5',1,'vi.s']]],\n ['vi_5flength_5ffile_161',['vi_length_file',['../vi_8s.html#a4ce74bdf77badacd182d1181a0e2b77a',1,'vi.s']]],\n ['vi_5flength_5ffile_5fcompute_162',['vi_length_file_compute',['../vi_8s.html#a57ddc9c9afcd4940ce8a258c8a99671b',1,'vi.s']]],\n ['vi_5fptr1_163',['vi_ptr1',['../vi_8s.html#aadda8db3383bdab25ec1683d8dd0f6eb',1,'vi.s']]],\n ['vi_5fptr2_164',['vi_ptr2',['../vi_8s.html#a450181cfcccedf7f533d5259df4d2eeb',1,'vi.s']]],\n ['vi_5fptr3_165',['vi_ptr3',['../vi_8s.html#a1c69f491e2054203373226bba5c59c7f',1,'vi.s']]],\n ['vi_5fptr_5ffile_166',['vi_ptr_file',['../vi_8s.html#a46b0a41d1f654c3f19989f9094001198',1,'vi.s']]],\n ['vi_5fptr_5ffile_5fused_167',['vi_ptr_file_used',['../vi_8s.html#affe3a94e7eb08fe3f01266521eb1bd6f',1,'vi.s']]],\n ['vi_5fptr_5fscreen_168',['vi_ptr_screen',['../vi_8s.html#ae75ff061f510abe8dcbcdc8434e5d3b9',1,'vi.s']]],\n ['vi_5fsavex_169',['vi_savex',['../vi_8s.html#a683876721ab370879aa1095e1b11444a',1,'vi.s']]],\n ['vi_5fstruct_170',['vi_struct',['../vi_8s.html#af804024f92f05e75a9f3a16f7e65873e',1,'vi.s']]],\n ['vi_5ftmp3_171',['vi_tmp3',['../vi_8s.html#a530efc8d92452198964b9568561138ae',1,'vi.s']]]\n];\n"
},
{
"alpha_fraction": 0.6205787658691406,
"alphanum_fraction": 0.6334404945373535,
"avg_line_length": 9,
"blob_id": "e31be51e5015d9741c5daa8b30070b5357ad102d",
"content_id": "bacb11b033fa684ac1bc9b7b979b9fee8e85650e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 28,
"path": "/kernel/docs/primitives/xmainargs.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XMAINARGS (argmains)\r\n\r\nID primitive : $2C\r\n\r\n## Description\r\n\r\nReturn argc and argv\r\n\r\n## Input\r\n\r\nNothing\r\n\r\n##Output\r\n\r\nA & Y contains ptr to XMAINARGS Struct\r\nX: number of args\r\n\r\n## Usage\r\n\r\n``` ca65\r\nXMAINARGS = $2C\r\n\r\nbrk_kernel XMAINARGS\r\n\r\nstx save_argc\r\nsta save_argvlow\r\nsty save_argvhigh\r\n``` \r\n\r\n"
},
{
"alpha_fraction": 0.5366336703300476,
"alphanum_fraction": 0.5485148429870605,
"avg_line_length": 11.948718070983887,
"blob_id": "3398c47b29e09bca3c9924d6e4162733a9bd120a",
"content_id": "b09f2394b97af89dbde19c288ed6ae4e2ddb2dc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 505,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 39,
"path": "/docs/kernel/primitives/xopendir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XOPENDIR\n\n## Description\n\nOpen a folder\n\n## Input\n\nA & X : string\n\n## Output\n\nA : fd\n\n## Example 1 : Openfolder /bin\n\n```ca65\n .include \"telestrat.inc\"\n\n ldx #$00 ; XOPENDIR Mode\n lda #<opendir\n ldy #>opendir\n BRK_TELEMON $2F\n cmp #$FF\n bne @ok\n cpx #$FF\n bne @ok\n\n print str_error\n\n rts\n@ok:\n ; Save ptr\n rts\nopendir:\n asciiz \"/bin\"\n```\n\n!!! fail \"XOPENDIR is in beta mode and close current open file : it brokes any fopen files.\"\n"
},
{
"alpha_fraction": 0.46054333448410034,
"alphanum_fraction": 0.4954721927642822,
"avg_line_length": 13.584905624389648,
"blob_id": "380f4026de94ffcd679f7547c9bff59bdd06dbe9",
"content_id": "2082255541f797cc3830a87967974fc8b2e18964",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 53,
"path": "/docs/tools_docs/vi/Files/vi__ptr__last__char__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ptr_last_char_sub_sub.s\n\n---\n\n# vi_ptr_last_char_sub_sub.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_last_char_sub_sub](Files/vi__ptr__last__char__sub__sub_8s.md#Routine-vi-ptr-last-char-sub-sub)** |\n\n\n## Routine documentation\n\n### Routine vi_ptr_last_char_sub_sub\n\n```ca65\nvi_ptr_last_char_sub_sub\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_ptr_last_char_sub_sub\n ldy #vi_struct_data::ptr_last_char_file\n iny\n lda (vi_struct),y\n bne @dec\n sec\n sbc #$01\n sta (vi_struct),y\n\n@dec:\n dey\n lda (vi_struct),y\n sec\n sbc #$01\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5904908180236816,
"alphanum_fraction": 0.6211656332015991,
"avg_line_length": 16.11111068725586,
"blob_id": "f6299de6a2b562bb5a857e8662dc8514405bd8c7",
"content_id": "574a3b91b5964c80fe6d2ebf1081490c8a0a0149",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 652,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 36,
"path": "/docs/kernel/primitives/xwrite.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XFWRITE\r\n\r\n## Description\r\n\r\nWrite bytes to file\r\n\r\n## Input\r\n\r\n* AY contains the length to write\r\n* PTR_READ_DEST must be set (ptr where data are read)\r\n* X contains the fd id\r\n\r\n## Output\r\n\r\nA & X contains the written length\r\n\r\n## Modification\r\n\r\n## Example\r\n\r\n```ca65\r\n lda #<$A000\r\n sta PTR_READ_DEST\r\n lda #>$A000\r\n sta PTR_READ_DEST+1\r\n\r\n lda #<12 ; Write 12 bytes\r\n ldy #>12\r\n ldx fp\r\n BRK_KERNEL XFWRITE\r\n rts\r\n```\r\n\r\n!!! tip \"See [fwrite](../../developer_manual/orixsdk_macros/fwrite) macro from orix-sdk to use it easily\"\r\n\r\n!!! fail \"XFWRITE does not manage multiples opened files : except since kernel 2022.2\"\r\n"
},
{
"alpha_fraction": 0.4443591833114624,
"alphanum_fraction": 0.47966232895851135,
"avg_line_length": 14.151163101196289,
"blob_id": "11dc3ac31fbdb637ce2fe91b45df0cb9d41c9762",
"content_id": "6492b3a7acd2da8f000bc1f1a5c1964e32359d0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1303,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 86,
"path": "/docs/tools_docs/vi/Files/vi__key__right_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_key_right.s\n\n---\n\n# vi_key_right.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_key_right](Files/vi__key__right_8s.md#Routine-vi-key-right)** |\n\n\n## Routine documentation\n\n### Routine vi_key_right\n\n```ca65\nvi_key_right\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_key_right\n\n ; are we on the end of the text file ?\n\n ; Compute if we need to erase ~\n jsr vi_check_eof\n cmp #IS_EOF\n bne @not_eof\n rts\n\n@not_eof:\n jsr vi_editor_switch_off_cursor\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @no_add_x\n cmp #LF\n beq @no_add_x\n\n jsr vi_ptr_file_used_plus_plus\n jsr vi_xpos_screen_plus_plus\n cmp #$01\n bne @no_add_x\n\n jsr vi_set_xpos_0\n jsr vi_ypos_screen_plus_plus\n cmp #$01\n bne @no_add_x\n\n scroll up, 0, 26 ; Yes scroll\n\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n lda vi_ptr1\n bne @S1\n dec vi_ptr1+1\n@S1:\n dec vi_ptr1\n\n\n lda vi_ptr1\n ldy vi_ptr1+1\n jsr vi_fill_last_line\n@no_add_x:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7299205660820007,
"alphanum_fraction": 0.739629328250885,
"avg_line_length": 26.634145736694336,
"blob_id": "5a3bad7496d34453625db2177824a152d7d64a4a",
"content_id": "0c13f114fac2bd9643cc0fcedb4be0b96a8d74c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1135,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 41,
"path": "/docs/commands/bank.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# bank\n\n## Introduction\n\nBank command is command line tool to see which bank are loaded into EEPROM bank and RAM bank. Each bank has a\n\"signature\". Bank allows to see theses banks.\nBank can also starts a ROM with his id. In that case, you don’t need to have a rom \"orix friendly\" and you can start it\nfrom command line. In the current bank version, there is restriction to launch a command.\n\n## SYNOPSYS\n\n### List all bank (when ROM signature is valid)\n\n/#bank\nBank 1 to 32 is eeprom bank and bank 33 to 64 are ram bank\n\n### Displays all signature even when ROM is not valid\n\n/#bank\n\n### List all commands from a bank\n\n/#help -b5\n\n### Start a specific bank\n\n/#bank 1\n\nIf you need to load a rom into a bank, you need to have a look to orixcfg binar\n\n## DESCRIPTION\n\nThis command displays bank when the command is called without parameter. WIth a parameter, you can switch to a the id of the bank passed to the argument :\n\nbank : displays all the bank (if a signature is found)\nbank 4 : switch to bank 4\nbank -a : displauys all bank (empty bank too)\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/bank.asm\n"
},
{
"alpha_fraction": 0.40190476179122925,
"alphanum_fraction": 0.4647619128227234,
"avg_line_length": 11.5,
"blob_id": "fc6ed8d810965249f68ebe883647ad51f70e72cb",
"content_id": "f8154389d2d12dc1cda0093e0ff800a9aeef573d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 525,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 42,
"path": "/doxygen/doxybook_output_vi/Files/vi__set__xpos__0_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_set_xpos_0.s\n\n---\n\n# vi_set_xpos_0.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_xpos_0](Files/vi__set__xpos__0_8s.md#Routine-vi-set-xpos-0)** |\n\n\n## Routine documentation\n\n### Routine vi_set_xpos_0\n\n```ca65\nvi_set_xpos_0\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_set_xpos_0\n ldy #vi_struct_data::xpos_screen\n lda #$00\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5975610017776489,
"alphanum_fraction": 0.6646341681480408,
"avg_line_length": 16,
"blob_id": "a48acb8ae4e40294ac5fc78a54defc76ed213cd3",
"content_id": "6f0d5b1db8b7a5a6dc4b3d132d17965f9fce6d06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 9,
"path": "/docs/update/file/file.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# file\r\n\r\n## 2022.4\r\n\r\n* Built in order to manage fd file open\r\n* built in relocation binary\r\n* displays more informations from format 2\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5104625821113586,
"alphanum_fraction": 0.5462555289268494,
"avg_line_length": 16.461538314819336,
"blob_id": "21e09aaf3b66c7c3d02d2a1ecb9296c812f8627f",
"content_id": "4c00af871f44bcd96c323a4fbc11dc8b38b03763",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1822,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 104,
"path": "/doxygen/doxybook_output/Files/vi__search__previous__cr_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_previous_cr.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_previous_cr.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_search_previous_cr](Files/vi__search__previous__cr_8s.md#function-vi-search-previous-cr)**() |\n\n\n## Functions Documentation\n\n### function vi_search_previous_cr\n\n```cpp\nvi_search_previous_cr()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_search_previous_cr\n ; Cherche le précédent \\n\n ; retourne dans A et Y la position en ptr\n ; Si on est en début de fichier A et Y = $0000\n ; retourne dans X le nombre de char trouvés entre le $0D et le 1er char de la ligne précédnte ligne\n\n sta vi_ptr1\n sty vi_ptr1+1\n\n ldx #$00\n@L1:\n ldy #$00\n lda (vi_ptr1),y\n cmp #CR\n beq @exit\n cmp #LF\n beq @no_inc\n\n inx\n\n@no_inc:\n lda vi_ptr1\n bne @S1\n dec vi_ptr1+1\n@S1:\n dec vi_ptr1\n\n lda vi_ptr1\n cmp vi_ptr_file\n bne @not_beginning\n\n lda vi_ptr1+1\n cmp vi_ptr_file+1\n bne @not_beginning\n\n@exit_beginning_of_the_file_is_reached:\n inx\n lda #$00\n ldy #$00\n rts\n\n@not_beginning:\n jmp @L1\n\n@exit:\n\n\n lda vi_ptr1\n bne @S2\n dec vi_ptr1+1\n@S2:\n dec vi_ptr1\n\n lda vi_ptr1\n cmp vi_ptr_file\n bne @not_beginning2\n\n lda vi_ptr1+1\n cmp vi_ptr_file+1\n bne @not_beginning2\n beq @exit_beginning_of_the_file_is_reached\n\n@not_beginning2:\n lda vi_ptr1\n ldy vi_ptr1+1\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5156096816062927,
"alphanum_fraction": 0.5311211347579956,
"avg_line_length": 16.382251739501953,
"blob_id": "c360ca5e9b4aec396fab8a61f235ee67f164675a",
"content_id": "9636b4a57a84c90a15274f3bb4488c95ef1079e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5093,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 293,
"path": "/docs/tools_docs/vi/Files/vi__command__edition_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_command_edition.s\n\n---\n\n# /Routines/vi_command_edition.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_command_edition](Files/vi__command__edition_8s.md#Routine-vi-command-edition)** |\n\n\n## Routine documentation\n\n### Routine vi_command_edition\n\n```ca65\nvi_command_edition\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_command_edition\n\n jsr vi_clear_command_line\n ldy #vi_struct_data::xpos_command_line\n lda #$01\n sta (vi_struct),y\n\n lda #':'\n sta VI_COMMANDLINE_VIDEO_ADRESS\n\n ldy #vi_struct_data::command_line_buffer\n sta (vi_struct),y ; Insert :\n\n@read_keyboard:\n cgetc\n cmp #KEY_RIGHT\n beq @read_keyboard\n cmp #KEY_LEFT\n beq @read_keyboard\n cmp #KEY_UP\n beq @read_keyboard\n cmp #KEY_DOWN\n beq @read_keyboard\n cmp #KEY_ESC\n beq @exit\n cmp #KEY_DEL\n beq @del\n cmp #KEY_RETURN\n beq @execute_command_line\n sta vi_tmp1\n ldy #vi_struct_data::xpos_command_line\n lda (vi_struct),y\n cmp #39 ; End of line ?\n beq @read_keyboard\n tay\n lda vi_tmp1\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n\n\n\n iny\n tya\n ldy #vi_struct_data::xpos_command_line\n sta (vi_struct),y\n\n jmp @read_keyboard\n\n@exit:\n jsr vi_clear_command_line\n lda #$00\n rts\n@del:\n ldy #vi_struct_data::xpos_command_line\n lda (vi_struct),y\n cmp #$01\n beq @erase_char_and_exit\n ; Erase char on screen\n pha\n tay\n lda #' '\n dey\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n pla\n\n ; Erase char into command_line_buffer\n pha\n clc\n adc #vi_struct_data::command_line_buffer\n tay\n lda #$00 ; EOS\n sta (vi_struct),y ; Insert :\n pla\n\n ; dec pos\n tay\n dey\n tya\n ldy #vi_struct_data::xpos_command_line\n sta (vi_struct),y\n\n jmp @read_keyboard\n\n@erase_char_and_exit:\n lda #' '\n sta VI_COMMANDLINE_VIDEO_ADRESS\n lda #$00\n rts\n\n\n@execute_command_line:\n ldy #$01\n@read_commandline:\n sty vi_tmp2\n lda VI_COMMANDLINE_VIDEO_ADRESS,y\n cmp #'q'\n beq @exit_vi\n cmp #'w'\n beq @write_file\n cmp #KEY_ESC\n beq @key_esc\n lda #$00\n rts\n@key_esc:\n jsr vi_clear_command_line\n lda #$00\n rts\n@exit_vi:\n lda #$01\n rts\n@write_file:\n sty vi_tmp2\n\n ; Check if we have an arg (filename)\n\n iny\n\n lda VI_COMMANDLINE_VIDEO_ADRESS,y\n cmp #' ' ; If it's a space after w, then there is an arg\n beq @filename_in_argument\n\n@file_inserted:\n dey\n\n lda vi_fileopened\n beq @display_missing_filename\n\n\n; lda vi_struct+1\n; sta vi_arg1+1\n\n; lda #vi_struct_data::name_file_open\n; clc\n; adc vi_struct\n; bcc @no_inc_arg1\n; inc vi_arg1+1\n; @no_inc_arg1:\n; sta vi_arg1\n\n jsr @vi_open_and_write_file\n\n\n ; Clear\n\n jsr vi_clear_command_line\n\n ldy #$00\n@loop_written:\n lda msg_written,y\n beq @out\n\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n jmp @loop_written\n\n@out:\n ldy vi_tmp2\n iny\n jmp @read_commandline\n\n@filename_in_argument:\n iny ; skip space\n tya\n tax\n\n lda #$01\n sta vi_fileopened\n\n ldy #vi_struct_data::name_file_open\n@loop_copy_filename:\n lda VI_COMMANDLINE_VIDEO_ADRESS,x\n beq @exit_copy_filename\n cmp #' '\n beq @exit_copy_filename\n sta (vi_struct),y\n inx\n iny\n cpy #VI_MAX_LENGTH_FILENAME+vi_struct_data::name_file_open\n bne @loop_copy_filename\n\n\n@exit_copy_filename:\n lda #$00 ; EOS if overflow\n sta (vi_struct),y\n\n txa\n tay\n\n jmp @file_inserted\n\n\n\n@display_missing_filename:\n\n\n ldy #$00\n@loop_written2:\n lda msg_nofilename,y\n beq @out2\n\n sta VI_COMMANDLINE_VIDEO_ADRESS,y\n iny\n jmp @loop_written2\n\n@out2:\n rts\n\n\n@vi_open_and_write_file:\n\n\n\n lda vi_struct+1\n sta vi_ptr1+1\n\n lda #vi_struct_data::name_file_open\n clc\n adc vi_struct\n bcc @no_inc_vi_ptr1\n inc vi_ptr1+1\n@no_inc_vi_ptr1:\n sta vi_ptr1\n\n\n\n fopen (vi_ptr1), O_CREAT|O_WRONLY,,vi_fp\n cpx #$FF\n bne @opened_file\n cmp #$FF\n bne @opened_file\n beq @error_writing_file\n\n@opened_file:\n ; get length file\n ; ldy #vi_struct_data::length_file\n; lda (vi_struct),y\n ;sta vi_length_file\n ;iny\n ;lda (vi_struct),y\n ;sta vi_length_file+1\n ;dec vi_length_file\n; @me:\n; jmp @me\n fwrite (vi_ptr_file), (vi_length_file), 1, vi_fp ; myptr is from a malloc for example\n\n fclose(vi_fp)\n rts\n\n@error_writing_file:\n\n print str_error\n jmp @error_writing_file\n rts\n\nstr_error:\n .asciiz \"Error\"\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6320939064025879,
"alphanum_fraction": 0.6477494835853577,
"avg_line_length": 14,
"blob_id": "fdd4d88caa0ff1d0ca0fc47a882b96efa07b6db5",
"content_id": "593f2163123da7366bb48dc6844fc919d056eafe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 511,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 34,
"path": "/docs/commands/strerr.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: strerr\n\n### strerr utility\n\n## SYNOPSYS\n+ strerr -h|-v|-a\n* strerr [-q] start[,end]\n* strerr [-a[q]] [start]\n\n## EXAMPLES\n+ strerr 1\n* strerr 1,4\n* strerr ,18\n* strerr -a 3\n\n## DESCRIPTION\n**strerr** return string describing error number\n\n## OPTIONS\n* -h\n display command syntax\n* -v\n display program version\n* -a\n display all message from start\n* -q\n quiet mode\n* start\n message number (def: 0)\n* end\n last message number (def: 0)\n\n## SOURCE\nhttps://github.com/orix-software/strerr\n\n"
},
{
"alpha_fraction": 0.519446849822998,
"alphanum_fraction": 0.5436473488807678,
"avg_line_length": 15.768115997314453,
"blob_id": "c302752e9f0511cb40d03e5ed8c6782182409cfb",
"content_id": "3bc46b76d9d79f9fb4232cf848df0c1024f31e87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1157,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 69,
"path": "/doxygen/doxybook_output/Files/vi__ptr__last__char__add_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_add.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_add.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_last_char_add](Files/vi__ptr__last__char__add_8s.md#function-vi-ptr-last-char-add)**() |\n\n\n## Functions Documentation\n\n### function vi_ptr_last_char_add\n\n```cpp\nvi_ptr_last_char_add()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_ptr_last_char_add\n\n ; A and X the value to add\n pha\n txa\n ldy #vi_struct_data::ptr_last_char_file+1 ; 3737\n clc\n adc (vi_struct),y\n sta (vi_struct),y\n pla\n\n ldy #vi_struct_data::ptr_last_char_file\n\n sec\n sbc #$01\n\n clc\n adc (vi_struct),y\n\n bcc @do_not_inc_ptr_last_char\n pha\n iny\n lda (vi_struct),y\n clc\n adc #$01\n sta (vi_struct),y\n dey\n pla\n\n@do_not_inc_ptr_last_char:\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5279831290245056,
"alphanum_fraction": 0.5522703528404236,
"avg_line_length": 16.867923736572266,
"blob_id": "cf673e37af1c6b890aa22ff52c8bb0b620a15c4f",
"content_id": "26349883165ab3e7294c66b6d919a781eb9621d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 53,
"path": "/doxygen/doxybook_output/Files/vi__ptr__last__char__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_sub_sub.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_sub_sub.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_last_char_sub_sub](Files/vi__ptr__last__char__sub__sub_8s.md#function-vi-ptr-last-char-sub-sub)**() |\n\n\n## Functions Documentation\n\n### function vi_ptr_last_char_sub_sub\n\n```cpp\nvi_ptr_last_char_sub_sub()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_ptr_last_char_sub_sub\n ldy #vi_struct_data::ptr_last_char_file\n iny\n lda (vi_struct),y\n bne @dec\n sec\n sbc #$01\n sta (vi_struct),y\n\n@dec:\n dey\n lda (vi_struct),y\n sec\n sbc #$01\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5061812996864319,
"alphanum_fraction": 0.526098906993866,
"avg_line_length": 14.326315879821777,
"blob_id": "7e3bdab648c9008baf1ec1f5768c25e293f8bb65",
"content_id": "f6f8220f110f6446e8c01c4f8272477db653f781",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1456,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 95,
"path": "/doxygen/doxybook_output/Files/vi__put__char_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_put_char.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_put_char.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_put_char](Files/vi__put__char_8s.md#function-vi-put-char)**() |\n\n\n## Functions Documentation\n\n### function vi_put_char\n\n```cpp\nvi_put_char()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_put_char\n\n cmp #$0A\n beq exit\n\n cmp #$0D\n beq @return_line\n\n pha\n\n jsr vi_compute_video_adress\n\n\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n cmp #40\n bne @not_eol_onscreen\n\n jsr vi_ypos_screen_plus_plus\n jsr vi_compute_video_adress\n ldy #vi_struct_data::xpos_screen\n lda #$00\n sta (vi_struct),y\n\n\n@not_eol_onscreen:\n tay\n pla\n\n sta (vi_ptr_screen),y\n iny\n bne @do_not_inc_y\n\n@do_not_inc_y:\n tya\n ldy #vi_struct_data::xpos_screen\n sta (vi_struct),y\n\n rts\n\n@return_line:\n lda #$00\n ldy #vi_struct_data::xpos_screen\n sta (vi_struct),y\n\n ldy #vi_struct_data::file_number_of_line\n lda (vi_struct),y\n clc\n adc #$01\n sta (vi_struct),y\n\n\n jsr vi_ypos_screen_plus_plus\n\n rts\nexit:\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7634854912757874,
"alphanum_fraction": 0.7634854912757874,
"avg_line_length": 29.25,
"blob_id": "c869fb4a93d596460a7e39e6f8bb6869da8bb42b",
"content_id": "18ab75438341922da4c9aa45c37a1077ce290d71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 8,
"path": "/docs/libs/twillib.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Twil libs\n\nManage rom/ram banks\n```c\nunsigned char twil_program_rambank(unsigned char bank, char *file, unsigned char set); \nunsigned char twil_clear_rambank(unsigned char bank, unsigned char set);\nunsigned char twil_lib_version(void);\n```"
},
{
"alpha_fraction": 0.7477116584777832,
"alphanum_fraction": 0.7534325122833252,
"avg_line_length": 46.24324417114258,
"blob_id": "c2b06e62b50dfd9125c07daffecc0cd5a7c71169",
"content_id": "806c00a71cccc3544a0bb0a69a45cc5d4b60c3b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1748,
"license_type": "no_license",
"max_line_length": 220,
"num_lines": 37,
"path": "/docs/developer_manual/pizero_connection.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Pi zero connection\n\nYou can also develop on the oric, if you have it next you and you have a pi zero with usb storage gadget configured\n\nMount under windows a drive which is pi zero with samba binary. Then when build your code, you just need to copy the binary to the right letter.\n\n For example : copy mycode /s/bin/\n\n /s/bin is pi zero folder, and you just need to type mycode at command line.\n\nYou need to execute in crontab a shell script. It flushs linux cache to disk. The problem with gadget is that it keeps action in memory, and in facts, samba does not show you the binary because, it's not written on disk.\n\n## How to install\n\n* Download img [Pi image](http://repo.orix.oric.org/orixpi.img.gz)\n\n* 'gzip -d' it or use 7zip\n\n* Download and install Win32 disk imager : [DiskImager](https://sourceforge.net/projects/win32diskimager/)\n\n* insert a sdcard into your PC, and write the orixpi.img on the sdcard with Win32 disk imager\n\n* remove sdcard from the pc and insert it to pizero when Win32 disk imager had finished\n\n* insert a usb cable in the *second* usb port (it's really important), and insert usb cable into the PC\n\n* Wait one minute, your PC should detect a new storage device (it's the pi)\n\n* in the new device, copy all files you need for Orix (sdcard.tgz content for example)\n\n* with an text editor, edit on this new mass storage : */etc/pizero/wpa/wpa.cnf* and insert the wifi network you use and the password\n\n* unplug the zero pi and insert it in the usb twilighte board port\n\n* switch on the oric with the board and wait 1 minutes\n\n* If everything is ok, you should see your files with \"ls\" and the pizero should be in your wifi network, you can now access to the network folder of pizero from your pc (with the right IP)\n"
},
{
"alpha_fraction": 0.5420792102813721,
"alphanum_fraction": 0.5891088843345642,
"avg_line_length": 16.565217971801758,
"blob_id": "054c988d95557ea7cda5a21df18848e75a402643",
"content_id": "fccbac48e96e5129fc9335f1b7c9eea73792a6a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 404,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 23,
"path": "/doxygen/doxybook_output/Files/dir_e1568de7a9ec0caf269f7729a27efb24.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/commands\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/commands\n\n\n\n## Files\n\n| Name |\n| -------------- |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/commands/vi.s](Files/vi_8s.md#file-vi.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7290640473365784,
"alphanum_fraction": 0.7290640473365784,
"avg_line_length": 10.941176414489746,
"blob_id": "31c801b658cfe3a64026126edd40e4743550db16",
"content_id": "b152ce30412fa4c53fea842d6f846b42aa07a089",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 17,
"path": "/docs/commands/mount.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# mount\n\n## Introduction\n\nDisplays mounts\n\n## SYNOPSYS\n\n+ #mount\n\n## DESCRIPTION\n\nDisplays mount (usb key or sdcard)\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/mount.asm\n"
},
{
"alpha_fraction": 0.45798319578170776,
"alphanum_fraction": 0.49159663915634155,
"avg_line_length": 10.526315689086914,
"blob_id": "f9cdd71912c2b7ddc901f702baafae50cbbe44bc",
"content_id": "c62e2a5fecd8c21bde10f18020abe3950b382c46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 238,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 19,
"path": "/docs/kernel/primitives/xdecim.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XDECIM\r\n\r\n## Usage\r\n\r\nDisplays an integer into the the screen\r\n\r\n## Example\r\n\r\n```ca65\r\n\r\n .include \"telestrat.inc\"\r\n\r\n ldy #$00\r\n ldx #$20 ;\r\n stx DEFAFF\r\n ldx #$00\r\n BRK_TELEMON XDECIM\r\n rts\r\n```\r\n"
},
{
"alpha_fraction": 0.6660350561141968,
"alphanum_fraction": 0.6660350561141968,
"avg_line_length": 122.29412078857422,
"blob_id": "671024270c657db11df0cc56169b953255c1308c",
"content_id": "dce2021b5f98f1de9ad0579006eb7c98b87fed24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2111,
"license_type": "no_license",
"max_line_length": 423,
"num_lines": 17,
"path": "/docs/developer_manual/guidelines.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Guidelines\r\n\r\n## Ways to do a binary\r\n\r\nThere is many ways to buid a binary : bank or on storage\r\n\r\nTo summarize, the best speed shoud be read only bank, but it could a bit slow when program runs cd\r\n\r\n| type | Starting speed |Loading| kernel search command speed| Runtime speed |\r\n|-------------|--------------------------------------|-------|----------------------------|---------------|\r\n|Eeprom bank | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star }| :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star }\r\n|RAM bank | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } |:fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star }\r\n|Binary from /bin | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } | :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star } :fontawesome-regular-star:{ .star }\r\n\r\n* Eeprom bank : No loading, Best mode to find a command for the kernel and probably slower due to zp use and indirect mode to access memory.\r\n* RAM bank : No loading, second Best mode to find a command for the kernel and probably slower due to zp use and indirect mode to access memory.\r\n* Binary on the storage : loading, the worst way to find a command : kernel search into bank and after in the storage. Probably faster than bank (due to absolute mode in it's own buffers)."
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6964285969734192,
"avg_line_length": 10.199999809265137,
"blob_id": "fe39dac159efd67e3dddfa978a8950e255844e9f",
"content_id": "5500540d6f7549b47b9dcc66ad1e9d1dfd0b5a36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 15,
"path": "/docs/commands/blakes7.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Blakes7\n\nStart Blakes7\n\n/#blakes7\n\nIf you want to remove your saved game :\n\n/#cd /usr/share/blakes7\n\n/usr/share/blakes7# rm 136.o\n\n## Check the version\n\n/#blakes7 -v\n"
},
{
"alpha_fraction": 0.7207792401313782,
"alphanum_fraction": 0.7207792401313782,
"avg_line_length": 10.84615421295166,
"blob_id": "35312851b206f1e9d46c485df16fb4bc2ba186a3",
"content_id": "89f86454866e892fc157d94bd6995b9b182a68af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 13,
"path": "/docs/commands/pwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# pwd\n\n## Introduction\n\n Display the current path\n\n## SYNOPSYS\n\n+ pwd\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/pwd.asm\n"
},
{
"alpha_fraction": 0.5301944613456726,
"alphanum_fraction": 0.5557830333709717,
"avg_line_length": 16.763635635375977,
"blob_id": "61eb281128c6021a85e25fedfd9ae81d3d4f8197",
"content_id": "e6e44bf4fbd122d95ef45c6be1dfe1e5376b9f55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 977,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 55,
"path": "/doxygen/doxybook_output/Files/vi__ptr__last__char__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_plus_plus.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_plus_plus.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_last_char_plus_plus](Files/vi__ptr__last__char__plus__plus_8s.md#function-vi-ptr-last-char-plus-plus)**() |\n\n\n## Functions Documentation\n\n### function vi_ptr_last_char_plus_plus\n\n```cpp\nvi_ptr_last_char_plus_plus()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_ptr_last_char_plus_plus\n\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n clc\n adc #$01\n bcc @S1\n pha\n iny\n lda (vi_struct),y\n clc\n adc #$01\n sta (vi_struct),y\n dey\n pla\n@S1:\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7202796936035156,
"alphanum_fraction": 0.7202796936035156,
"avg_line_length": 14.88888931274414,
"blob_id": "5649b56d8303c46f97399763875ec9b8b05cbf78",
"content_id": "f12e0cd50fb05915938591f7cc72909774655ee5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 27,
"path": "/docs/commands/man.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# man\n\n## Introduction\n\ndisplay manual pages\n\n## SYNOPSYS\n\n+ man lsmem\n\n## DESCRIPTION\n\nDisplays manual. All .hlp files are located in /usr/share/man/. It manages multiples text screen (if .hlp is bigger than a screen when space is pressed, it switch to next page).\n\n## Keys\n\n* SPACE to switch to next page\n\n* ESC to exit\n\n## EXAMPLES\n\n+ man ls\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/man.asm\n"
},
{
"alpha_fraction": 0.60628741979599,
"alphanum_fraction": 0.6227545142173767,
"avg_line_length": 17.58823585510254,
"blob_id": "bf63642763cceae9761bd87085dff19864a11585",
"content_id": "b0774456061a625946c707f2def40fb6d50eb3fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 34,
"path": "/docs/developer_manual/orixsdk_macros/malloc.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "\r\n# Malloc\r\n\r\n## Description\r\n\r\nPerforms a malloc\r\n\r\n## usage\r\n\r\nmalloc size [,ptr] [,oom_msg_ptr] [,fail_value]\r\n\r\nmalloc #$0100\r\nmalloc (ptr)\r\nmalloc value\r\n\r\nNote:\r\n - if parameter 'ptr' is present, store resulting AY in ptr &ptr+1\r\n - if parameter 'oom_msg_ptr' is present, emit string pointed by\r\n 'oom_msg_ptr' and return if AY is null (ie malloc error)\r\n\r\n## Output\r\n\r\nA and Y contains the ptr. If A and Y are equal to 0, the malloc had failed\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_memory.mac\"\r\n\r\n malloc #$100\r\n rts\r\n```\r\n\r\nCall [XMALLOC](../../../kernel/primitives/xmalloc/) function\r\n"
},
{
"alpha_fraction": 0.6328740119934082,
"alphanum_fraction": 0.6791338324546814,
"avg_line_length": 20.577777862548828,
"blob_id": "082837c9fc9075b15ee2ec29eb314865a0aef29a",
"content_id": "7dfc4ce709c08ef3cd97b984dcb99d9e3ced07a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1016,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 45,
"path": "/docs/update/2023_2.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# 2023.2\r\n\r\n## Kernel\r\n\r\n* XEXEC works with absolute path (/root/bin/toto can be launched)\r\n* XEXEC works with relative \"./\" syntax only (./toto can be launched)\r\n* *#!* script are now working\r\n* Fix bug when we switch to hires many times\r\n\r\n## Shell\r\n\r\n* Cleaning\r\n* Fix bug in lsmem (free chunk)\r\n* viewhrs can have an arg \"-t\" in order to specify a wait value before viewhrs exit\r\n\r\n## cc65\r\n\r\nfwrite and fread did not returns the right number of byte read or written\r\n\r\n## Tools\r\n\r\n* 'more' available\r\n* 'blakes7' is now in relocation format\r\n\r\n## Install\r\n\r\nDownload Kernel: https://repo.orix.oric.org/dists/official/tgz/6502/cardridge.tgz\r\n\r\nuntar/gunzip on the twilighte board device.\r\n\r\n### orixcfg (below 2023.2 orixcfg version)\r\n\r\nUnder Orix\r\n\r\n```bash\r\n/# cd /usr/share/carts/2023.2/\r\n/usr/share/carts/2023.2# orixcfg -r -s 4 kernelus.r64\r\n```\r\n\r\n## orixcfg (equal or greater than 2023.2 orixcfg version)\r\n\r\n```bash\r\n/# cd /usr/share/carts/2023.2/\r\n/usr/share/carts/2023.1# orixcfg -k kernelus.r64\r\n```\r\n"
},
{
"alpha_fraction": 0.7284768223762512,
"alphanum_fraction": 0.7284768223762512,
"avg_line_length": 10.615385055541992,
"blob_id": "5efa02d4d93c32da22637a0d51e29c95a8d6a3d7",
"content_id": "4affc6c253a98e5cc25ac49bc498fcb5f51779b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 13,
"path": "/docs/commands/clear.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# clear\n\n## Introduction\n\nClear the screen\n\n## SYNOPSYS\n\n+ clear\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/clear.asm\n"
},
{
"alpha_fraction": 0.550561785697937,
"alphanum_fraction": 0.5750765800476074,
"avg_line_length": 17.471698760986328,
"blob_id": "4457c0798d8f5f134c408bab9fc2635b276d2654",
"content_id": "0e5dd807d09e09a225e9283a811210e65d72f6dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 53,
"path": "/doxygen/doxybook_output/Files/vi__ptr__file__used__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_sub_sub.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_sub_sub.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_file_used_sub_sub](Files/vi__ptr__file__used__sub__sub_8s.md#function-vi-ptr-file-used-sub-sub)**() |\n\n\n## Functions Documentation\n\n### function vi_ptr_file_used_sub_sub\n\n```cpp\nvi_ptr_file_used_sub_sub()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_ptr_file_used_sub_sub\n\n jsr vi_check_beginning_of_file\n cmp #IS_BEGINNING_OF_THE_FILE\n beq @beginning_of_file\n\n lda vi_ptr_file_used ; 98\n bne @out\n dec vi_ptr_file_used+1\n@out:\n dec vi_ptr_file_used\n lda #$01\n rts\n\n@beginning_of_file:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.48439821600914,
"alphanum_fraction": 0.6047548055648804,
"avg_line_length": 20.70967674255371,
"blob_id": "24a9f9719809aa9ef69f1a9091920179532759d1",
"content_id": "b481cbcc64ef8a34a8c97fed48913835c2ed1e28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 673,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 31,
"path": "/doxygen/doxybook_output_vi/Files/dir_6c260d28152e78a3ffcc2e06b7438967.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: \n\n---\n\n# \n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[/commands](Files/dir_e1568de7a9ec0caf269f7729a27efb24.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/commands)** |\n| **[/data](Files/dir_eb94e028ad508402029845f2921e79f7.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/data)** |\n| **[/Routines](Files/dir_2288eccfea1af74b995388678c757cc0.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/Routines)** |\n\n## Files\n\n| Name |\n| -------------- |\n| **[/rom.s](Files/rom_8s.md#file-rom.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.41436463594436646,
"alphanum_fraction": 0.5138121843338013,
"avg_line_length": 9.647058486938477,
"blob_id": "d711777caebdc83390098243fb798a656e5b4e40",
"content_id": "e8e7806714d726b174b82a891b2eab738ecdbfa0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 17,
"path": "/doxygen/doxybook_output/index_classes.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Classes\n\n---\n\n# Classes\n\n\n\n\n* **struct [vi_struct_data](Classes/structvi__struct__data.md)** \n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5281862616539001,
"alphanum_fraction": 0.5533088445663452,
"avg_line_length": 23.35820960998535,
"blob_id": "2a470b2770f9cb6e418f652ab8b912721417bd10",
"content_id": "2f6e13489a279add52f27b4791d224f64cab1d1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1632,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 67,
"path": "/docs/tools_docs/vi/Files/vi__struct_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_struct.s\n\n---\n\n# /Routines/vi_struct.s\n\n\n\n## Classes\n\n| | Name |\n| -------------- | -------------- |\n| struct | **[vi_struct_data](Structs/structvi__struct__data.md)** |\n\n## Defines\n\n| | Name |\n| -------------- | -------------- |\n| | **[VI_MAX_LENGTH_FILENAME](Files/vi__struct_8s.md#define-vi-max-length-filename)** |\n\n\n\n\n## Macros Documentation\n\n### define VI_MAX_LENGTH_FILENAME\n\n```ca65\n#define VI_MAX_LENGTH_FILENAME 16\n```\n\n\n## Source code\n\n```ca65\n\n.define VI_MAX_LENGTH_FILENAME 16\n\n.struct vi_struct_data\n xpos_screen .byte ; position x of the cursor on the screen\n ypos_screen .byte ; position y of the cursor on the screen\n pos_file_addr .word ; position on the file (address)\n pos_file .res 4 ; position in the file\n posx_command_line .byte ; position on command line\n name_file_open .res VI_MAX_LENGTH_FILENAME\n ;ptr_file_begin .word ; adress of the beginning of the file\n length_file .res 4 ; Length of the file\n pos_ptr_video_address .word\n file_number_of_line .res 4\n xpos_command_line .byte\n command_line_buffer .res 39\n ptr_last_char_file .word\n line_id .word\n xpos_text .byte\n ypos_text .byte\n.endstruct\n\n.if .sizeof(vi_struct_data) > 255\n .error \"vi_struct_data size is greater than 255. It's impossible because code does not handle a struct greater than 255\"\n.endif\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 17.399999618530273,
"blob_id": "383856e4f570e7f58fbeb4657671964232b18064",
"content_id": "77127a72a98298b19648848efc88dc6efaab8490",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 92,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 5,
"path": "/docs/commands/barboric.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Barboric/Barbitoric demo\n\nLaunch barbitoric demo from orix (sdcard/usbdrive)\n\n/# barboric\n"
},
{
"alpha_fraction": 0.5803571343421936,
"alphanum_fraction": 0.5803571343421936,
"avg_line_length": 10.222222328186035,
"blob_id": "f0db85af8d048116bbf69cd961abe84152f6c800",
"content_id": "a479681f64a12c9865c8311936a3fc0b4e05d328",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 9,
"path": "/docs/developer_manual/orixsdk_macros/prints.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# prints macro\r\n\r\n## Description\r\n\r\n## Usage\r\n\r\n## Example\r\n\r\n.include \"../../orix-sdk/macros/SDK_print.mac\"\r\n\r\n"
},
{
"alpha_fraction": 0.48659002780914307,
"alphanum_fraction": 0.5287356376647949,
"avg_line_length": 11.428571701049805,
"blob_id": "71f3eb02fa2aca35524d3a688bfdd5fd25618f52",
"content_id": "f35e01c6fd604e88ca8a59c291f14743c91aa639",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 21,
"path": "/docs/samples/c_samples/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Open and read files with cc65\n\n```c\n\n#include <stdio.h>\n\nunsigned char chars[255];\n\nmain (){\n fp=fopen(\"/myfile\",\"r\");\n if (fp==NULL)\n {\n printf(\"Can't open %s\\n\",argv[1]);\n return (1);\n } \n \n nb=fread(chars,255,1,fp);\n fclose(fp);\n}\n\n```\n"
},
{
"alpha_fraction": 0.4195979833602905,
"alphanum_fraction": 0.43467336893081665,
"avg_line_length": 11.655172348022461,
"blob_id": "eb0a2601f21a785ac753e9d0bb2dae9a0af648c5",
"content_id": "b1274dd09c2ecdd8a6ef5b746c47b66cc6a20e8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 29,
"path": "/docs/developer_manual/tutorials/print_string.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Tutorial\r\n\r\n## Hello world\r\n\r\n=== \"C\"\r\n\r\n ``` c\r\n #include <stdio.h>\r\n\r\n int main(void) {\r\n printf(\"Hello world!\\n\");\r\n return 0;\r\n }\r\n ```\r\n\r\n=== \"Asm\"\r\n\r\n ```ca65\r\n\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_print.mac\"\r\n\r\n start:\r\n print str_hello\r\n\r\n rts\r\n str_hello:\r\n .byte \"Hello world!\",$0A,$0D,0\r\n ```\r\n\r\n"
},
{
"alpha_fraction": 0.4649033546447754,
"alphanum_fraction": 0.49745675921440125,
"avg_line_length": 13.246376991271973,
"blob_id": "31225f5c2235b296474b49af9be77e4149da7b78",
"content_id": "6ff8ede9ae81cbd39f16c7d6ca5611ea16d31da6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 983,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 69,
"path": "/doxygen/doxybook_output_vi/Files/vi__ptr__last__char__add_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ptr_last_char_add.s\n\n---\n\n# vi_ptr_last_char_add.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_last_char_add](Files/vi__ptr__last__char__add_8s.md#Routine-vi-ptr-last-char-add)** |\n\n\n## Routine documentation\n\n### Routine vi_ptr_last_char_add\n\n```ca65\nvi_ptr_last_char_add\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_ptr_last_char_add\n\n ; A and X the value to add\n pha\n txa\n ldy #vi_struct_data::ptr_last_char_file+1 ; 3737\n clc\n adc (vi_struct),y\n sta (vi_struct),y\n pla\n\n ldy #vi_struct_data::ptr_last_char_file\n\n sec\n sbc #$01\n\n clc\n adc (vi_struct),y\n\n bcc @do_not_inc_ptr_last_char\n pha\n iny\n lda (vi_struct),y\n clc\n adc #$01\n sta (vi_struct),y\n dey\n pla\n\n@do_not_inc_ptr_last_char:\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5318559408187866,
"alphanum_fraction": 0.5623268485069275,
"avg_line_length": 14.409090995788574,
"blob_id": "479ae13d6be7bafee509e0ba0b927cd302921082",
"content_id": "57521a07cf66dee75ef7f29fb80309ad0c628e9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 22,
"path": "/docs/kernel/primitives/xreaddir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XREADDIR\r\n\r\n## Usage\r\n\r\nRead current folder and returns all entry in AY.\r\n\r\nIt returns max : 100 entry (READDIR_MAX_LINE = 100 values in kernel)\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n ldx #$01 ; XREADDIR MODE Mode\r\n BRK_TELEMON $2F\r\n rts\r\n@ok:\r\n ; Save ptr\r\n rts\r\n```\r\n\r\n!!! tip \"XREADDIR returns a ptr\"\r\n"
},
{
"alpha_fraction": 0.5295169949531555,
"alphanum_fraction": 0.5599284172058105,
"avg_line_length": 14.878787994384766,
"blob_id": "3e3cb9fd5ca320ccc49f282334003bfb8ef780ae",
"content_id": "582ebf15613511eb5fa516c0b02062fe5013281b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 559,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 33,
"path": "/docs/developer_manual/orixsdk_macros/strncat.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "\r\n# strncat\r\n\r\n## Description\r\n\r\nconcat 2 strings. Strings must be terminated by 0\r\n\r\n## Usage\r\n\r\nstrncat src, dest, n\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<file_path\r\n sta ptr2\r\n lda #>file_path\r\n sta ptr2+1\r\n\r\n strncpy ptr2, ptr1, #20 ;strncpy(src, dest, n)\r\n\r\n lda files_type_loader_low\r\n sta ptr2\r\n lda files_type_loader_high\r\n sta ptr2+1\r\n\r\n\r\n strncat RESB, ptr1 , #13\r\n rts\r\n```\r\n\r\n!!! warning \"dest ptr will be changed by strncat. dest pointer must be saved\"\r\n"
},
{
"alpha_fraction": 0.5467625856399536,
"alphanum_fraction": 0.5707433819770813,
"avg_line_length": 12.379310607910156,
"blob_id": "b52d0673fb4c773e26112e2fc8b0cc089988bbfe",
"content_id": "af8bb2bd582f9bf7ef4ebf7683f08fd85052634d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 29,
"path": "/docs/developer_manual/orixsdk_macros/memmove.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Memmove macro\r\n\r\n## Description\r\n\r\nMove memory\r\n\r\n## Usage\r\n\r\nmemmove ptr_dst, src_start, src_len\r\n\r\nexit:\r\n - X: unused\r\n - AY: ptr_dst (A=LSB)\r\n\r\nnote:\r\n ptr may be: (ptr), address\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_memory.mac\"\r\n\r\n memmove $c080,$c000, 10\r\n\r\n rts\r\n```\r\n\r\nCall [XDECAL](../../../kernel/primitives/xdecal/) kernel function.\r\n"
},
{
"alpha_fraction": 0.5504807829856873,
"alphanum_fraction": 0.5625,
"avg_line_length": 12.857142448425293,
"blob_id": "4d892f8a2dbd223215cd2cbc9301714cb9d9a358",
"content_id": "05cd1b454d8a015eb566dfcd275714ce1d09c876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 416,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 28,
"path": "/docs/kernel/primitives/xputcwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XPUTCWD (chdir)\r\n\r\n## Description\r\n\r\nChange current process directory\r\n\r\n## Input\r\n\r\nA & Y the 16 bits values (string with \\0)\r\n\r\n## Output\r\n\r\nN/A\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<str\r\n ldy #>str\r\n BRK_TELEMON XPUTCWD\r\n rts\r\nstr:\r\n .asciiz \"/etc\"\r\n```\r\n\r\n!!! tip \"See [chdir](../../developer_manual/orixsdk_macros/chdir) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.5507246255874634,
"alphanum_fraction": 0.5777778029441833,
"avg_line_length": 17.157894134521484,
"blob_id": "816ba9a84b71a21709e065c0b6a79c757cfa4322",
"content_id": "b1d55cf2bb552dd87adf975228f832c10cb87172",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1035,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 57,
"path": "/doxygen/doxybook_output/Files/vi__shift__line__left__to__right__editor_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_shift_line_left_to_right_editor.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_shift_line_left_to_right_editor.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_shift_line_left_to_right_editor](Files/vi__shift__line__left__to__right__editor_8s.md#function-vi-shift-line-left-to-right-editor)**() |\n\n\n## Functions Documentation\n\n### function vi_shift_line_left_to_right_editor\n\n```cpp\nvi_shift_line_left_to_right_editor()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_shift_line_left_to_right_editor\n ; A the line to scroll\n\n tay\n lda TABLE_LOW_TEXT,y\n sta vi_ptr1\n lda TABLE_HIGH_TEXT,y\n sta vi_ptr1+1\n\n\n ldy #38\n@L1:\n lda (vi_ptr1),y\n iny\n sta (vi_ptr1),y\n dey\n dey\n bpl @L1\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.596045196056366,
"alphanum_fraction": 0.6016949415206909,
"avg_line_length": 12.15999984741211,
"blob_id": "b163e0a4648f0908dfdf0fc24edde2808abd4a66",
"content_id": "ab4e3ba554c4fd08186f8f6c6dae3a208ba9634a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 25,
"path": "/docs/developer_manual/orixsdk_macros/exec.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Exec macro\r\n\r\n## Description\r\n\r\nExecute a binary\r\n\r\n## Usage\r\n\r\nexec command\r\n\r\ncommand may be : (ptr), address\r\n\r\n## Exammple\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_process.mac\"\r\n\r\n exec mycommand\r\n rts\r\nmycommand:\r\n .asciiz ls\r\n```\r\n\r\nCall [XEXEC](../../../kernel/primitives/XEXEC/) kernel function.\r\n"
},
{
"alpha_fraction": 0.5971428751945496,
"alphanum_fraction": 0.6028571724891663,
"avg_line_length": 14.571428298950195,
"blob_id": "7548ac927a003075d63bda80252eab8b0348fa08",
"content_id": "0a87d61c68d710a9741e857e1112eb47dedb5338",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 21,
"path": "/docs/developer_manual/orixsdk_macros/cursor.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Cursor\r\n\r\n## Description\r\n\r\nSwitch on or switch off the cursor\r\n\r\n## Usage\r\n\r\ncursor ON|OFF\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_display.mac\"\r\n\r\n cursor ON\r\n rts\r\n```\r\n\r\nCall [XCSSCR](../../../kernel/primitives/xcsscr) or [XCOSCR](../../../kernel/primitives/xcoscr) kernel function.\r\n\r\n"
},
{
"alpha_fraction": 0.5571955442428589,
"alphanum_fraction": 0.5867158770561218,
"avg_line_length": 17.066667556762695,
"blob_id": "f6e2196c0a1ef3423323db4b4b6923bb394f4af3",
"content_id": "b34424ba30c35cb4e13ad09fc329ae47b9eab10a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 813,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 45,
"path": "/doxygen/doxybook_output/Files/vi__clear__command__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_clear_command_line.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_clear_command_line.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_clear_command_line](Files/vi__clear__command__line_8s.md#function-vi-clear-command-line)**() |\n\n\n## Functions Documentation\n\n### function vi_clear_command_line\n\n```cpp\nvi_clear_command_line()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_clear_command_line\n ldx #40\n lda #$00 ; Set command line with 0\n@loop:\n sta VI_COMMANDLINE_VIDEO_ADRESS,x\n dex\n bpl @loop\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.4747474789619446,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 23.75,
"blob_id": "1e51ecd52401ad447447da12be93fdec7bc419d2",
"content_id": "08b88855b8ed0809f14b219ac72c7c28df94bb75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 4,
"path": "/doxygen/doc/html/search/files_0.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['_5fclrscr_5fvi_2es_88',['_clrscr_vi.s',['../__clrscr__vi_8s.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.5388127565383911,
"alphanum_fraction": 0.5652968287467957,
"avg_line_length": 16.109375,
"blob_id": "7aa9d9c657e0e9b722f65d619bd086b03e5fea0b",
"content_id": "19f75581896a361e146a615507c2fb61942da47c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1095,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 64,
"path": "/doxygen/doxybook_output/Files/vi__fill__screen__with__empty__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_fill_screen_with_empty_line.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_fill_screen_with_empty_line.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_fill_screen_with_empty_line](Files/vi__fill__screen__with__empty__line_8s.md#function-vi-fill-screen-with-empty-line)**() |\n\n\n## Functions Documentation\n\n### function vi_fill_screen_with_empty_line\n\n```cpp\nvi_fill_screen_with_empty_line()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_fill_screen_with_empty_line\n ; X the first line\n\n cpy #$00\n beq @out\n\n stx vi_tmp1\n ldx #VI_LAST_LINE_EDITOR\n\n ldy #$00\n\n@loop:\n lda TABLE_LOW_TEXT,x\n sta vi_ptr1\n lda TABLE_HIGH_TEXT,x\n sta vi_ptr1+1\n\n@skip:\n lda #VI_EDITOR_CHAR_LIMITS_EMPTY\n\n sta (vi_ptr1),y\n\n dex\n cpx vi_tmp1\n bpl @loop\n@out:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.4653061330318451,
"alphanum_fraction": 0.5142857432365417,
"avg_line_length": 11.189188957214355,
"blob_id": "7a2e5dfe93a42b7127ae9f3e948ebb65dae20d19",
"content_id": "821a1a609334c243c127f1a276146594c96b1e56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 37,
"path": "/kernel/docs/primitives/xbindx.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XBINDX (16 bytes to decimal)\r\n\r\n## Description\r\n\r\nconvert A & Y number into decimal\r\n\r\n## Input\r\n\r\nA & Y the 16 bits values\r\n\r\nTR5 the pointer of the address to put decimal number\r\n\r\nDEFAFF : space before the number\r\n\r\nX : xxx\r\n\r\n## Output\r\n\r\nA,TR4, TR5\r\n\r\n## Example\r\n\r\n``` ca65\r\n\tlda #<$bb80\r\n\tsta TR5\r\n\tlda #>$bb80\r\n\tsta TR5+1\r\n\r\n\tlda #$20\r\n\tsta DEFAFF\r\n\r\n\tldx\t\t #$01\r\n\tldy #$00\r\n\tlda #$10\r\n BRK_KERNEL XBINDX\r\n\trts\r\n```\r\n\r\n"
},
{
"alpha_fraction": 0.6595744490623474,
"alphanum_fraction": 0.6595744490623474,
"avg_line_length": 7.400000095367432,
"blob_id": "264165c37cc6f4f30689f9fd0862bd37460ecbfa",
"content_id": "938f90c84d6487717a5bc2f666681bb65d9b0cf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 5,
"path": "/docs/kernel/primitives/xcoscr.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XCOSCR\r\n\r\n## Description\r\n\r\nDisplays cursor\r\n"
},
{
"alpha_fraction": 0.738095223903656,
"alphanum_fraction": 0.738095223903656,
"avg_line_length": 27.714284896850586,
"blob_id": "14ec59b29436a74356d91ef6d43711902381a502",
"content_id": "5b049ea2352ea9c5b07ad43c0eaa27c2d4bba2ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 7,
"path": "/docs/hardware/synth_vocal_mea80900.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Voice synthesizer compatibility\r\n\r\nVoice synthetizer works with the board. It needs a silicebit amplibus to have the extension working.\r\n\r\n## How to plug with the board\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7162162065505981,
"alphanum_fraction": 0.7548262476921082,
"avg_line_length": 55.33333206176758,
"blob_id": "96c493aa520564b0d23703439f85fff3913fdb8a",
"content_id": "79de6064c299417c8e3bf09c85e5df8b95fd46a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 9,
"path": "/docs/developer_manual/cc65.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Cc65 use\r\n\r\ncc65 and ca65 are used mainly for kernel, shell and others binaries. Any assembler can be used, it just needs to add the Orix binary header at the beginning of the binary file.\r\n\r\nIt's easier to use ca65 because orix sdk provide ca65 macro and .cfg to build a binary when it's coded in assembly only.\r\n\r\nSome libs are also build under ca65, and it's easier to link with ca65 if your code is written with ca65.\r\n\r\nIf you code with cc65 or ca65, you need to use -ttelestrat switch to build Orix binary.\r\n\r\n"
},
{
"alpha_fraction": 0.6625683307647705,
"alphanum_fraction": 0.6721311211585999,
"avg_line_length": 21.612903594970703,
"blob_id": "3c331e889aca1a6e4eb29f312b2f5c9670d7b679",
"content_id": "f7238942b8c0adf3b55bac619944d08b7d498729",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 732,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 31,
"path": "/kernel/docs/primitives/xexec.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XEXEC\r\n\r\n## Description\r\n\r\nStart a binary (located in a ROM or on the current device)\r\n\r\n## Input\r\n\r\nA & Y the ptr of the string.\r\n\r\n## Output\r\n\r\nReturns an error if the binary is not found\r\n\r\n# Modification :\r\n* RES, RESB (kernel_create_process), TR0, TR1, TR4 (kernel_create_process), TR5 (kernel_create_process)\r\n* KERNEL_ERRNO (kernel_create_process), KERNEL_XKERNEL_CREATE_PROCESS_TMP (kernel_create_process),kernel_process_struct::kernel_pid_list (kernel_create_process)\r\n* DECDEB, DECFIN, DECCIB,DECTRV,ACC1M\r\n* VEXBNK, BUFEDT, BNKOLD, KERNEL_TMP_XEXEC, BNK_TO_SWITCH, KERNEL_KERNEL_XEXEC_BNKOLD\r\n\r\n\r\n## Example\r\n\r\n``` ca65\r\n lda #<str\r\n ldy #>str\r\n BRK_KERNEL XEXEC\r\n rts\r\nstr:\r\n .asciiz \"mybin\" \r\n```\r\n"
},
{
"alpha_fraction": 0.4984423816204071,
"alphanum_fraction": 0.5327102541923523,
"avg_line_length": 14.046875,
"blob_id": "9b3bfbde696567f1dbd3f129ec44534f65642425",
"content_id": "0a3e3f1e78259ad781c8e7b35e2886ccdf7e2e52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 963,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 64,
"path": "/doxygen/doxybook_output_vi/Files/vi__fill__screen__with__empty__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_fill_screen_with_empty_line.s\n\n---\n\n# /Routines/vi_fill_screen_with_empty_line.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_fill_screen_with_empty_line](Files/vi__fill__screen__with__empty__line_8s.md#Routine-vi-fill-screen-with-empty-line)** |\n\n\n## Routine documentation\n\n### Routine vi_fill_screen_with_empty_line\n\n```ca65\nvi_fill_screen_with_empty_line\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_fill_screen_with_empty_line\n ; X the first line\n\n cpy #$00\n beq @out\n\n stx vi_tmp1\n ldx #VI_LAST_LINE_EDITOR\n\n ldy #$00\n\n@loop:\n lda TABLE_LOW_TEXT,x\n sta vi_ptr1\n lda TABLE_HIGH_TEXT,x\n sta vi_ptr1+1\n\n@skip:\n lda #VI_EDITOR_CHAR_LIMITS_EMPTY\n\n sta (vi_ptr1),y\n\n dex\n cpx vi_tmp1\n bpl @loop\n@out:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5657894611358643,
"alphanum_fraction": 0.5870445370674133,
"avg_line_length": 19.58333396911621,
"blob_id": "15d9dc87b43ad22acfff985966d65bdf83d4f020",
"content_id": "b7d5b7eaf377185e6ef4deceb8ff25e0a6961e91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 989,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 48,
"path": "/doxygen/doxybook_output/Files/vi__add__char__to__text_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_add_char_to_text.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_add_char_to_text.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_add_char_to_text](Files/vi__add__char__to__text_8s.md#function-vi-add-char-to-text)**() |\n\n\n## Functions Documentation\n\n### function vi_add_char_to_text\n\n```cpp\nvi_add_char_to_text()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_add_char_to_text\n ; Ajoute un caractère dans le texte\n pha\n jsr vi_ptr_last_char_plus_plus\n jsr vi_shift_file_from_memory_one_char ; shift one char the text file in the memory\n ldy #$00\n pla\n sta (vi_ptr_file_used),y ; store \\n\n jsr vi_ptr_file_used_plus_plus\n jsr vi_length_file_plus_plus\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5598958134651184,
"alphanum_fraction": 0.5755208134651184,
"avg_line_length": 12.769230842590332,
"blob_id": "6d10b92295e91ae196701348a5fe6073b32ee7a6",
"content_id": "6aeb2059865520f1088595b326ae1c999ef1b5f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 26,
"path": "/docs/developer_manual/orixsdk_macros/fseek.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# fseek\r\n\r\n## Description\r\n\r\nMove into opened file\r\n\r\n## Usage\r\n\r\nfseek fp, offset, whence\r\n\r\nfp may be : (ptr), address\r\noffset may be: (ptr), address, constant\r\nwhence may be : address, #value\r\n\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_file.mac\"\r\n\r\n [...]\r\n fseek (myfp), 1080, #SEEK_CUR\r\n fclose(MAN_FP)\r\n rts\r\n```\r\n"
},
{
"alpha_fraction": 0.5521653294563293,
"alphanum_fraction": 0.5777559280395508,
"avg_line_length": 18.538461685180664,
"blob_id": "389283992859e000e253d3908d260db9e774de05",
"content_id": "d1a575253411e8fa8c3a9b3596ab05c4c678998f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1016,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 52,
"path": "/doxygen/doxybook_output/Files/vi__xpos__screen__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_xpos_screen_sub_sub.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_xpos_screen_sub_sub.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_xpos_screen_sub_sub](Files/vi__xpos__screen__sub__sub_8s.md#function-vi-xpos-screen-sub-sub)**() |\n\n\n## Functions Documentation\n\n### function vi_xpos_screen_sub_sub\n\n```cpp\nvi_xpos_screen_sub_sub()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_xpos_screen_sub_sub\n ;; vi_xpos_screen_sub_sub\n ; Return 00 if we are on the first line\n ; xpos_screen=ypos_screen+1\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n beq @no_substract\n sec\n sbc #$01\n sta (vi_struct),y\n lda #$01\n rts\n@no_substract:\n lda #IS_BEGINNING_OF_THE_LINE\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.4513295888900757,
"alphanum_fraction": 0.48036107420921326,
"avg_line_length": 16.89956283569336,
"blob_id": "a28c70c98d0f1cfabc5fccf6a9f91a865a463925",
"content_id": "79d65ffc385b7f5a7ee83a427cd1f3410b7098e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4099,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 229,
"path": "/docs/tools_docs/vi/Files/rom_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /rom.s\n\n---\n\n# /rom.s\n\n\n\n## Attributes\n\n| | Name |\n| -------------- | -------------- |\n| const char | **[userzp](Files/rom_8s.md#variable-userzp)** |\n| const char | **[NULL](Files/rom_8s.md#variable-null)** |\n| const char[3] | **[list_of_commands_bank](Files/rom_8s.md#variable-list-of-commands-bank)** |\n| int | **[addr_commands](Files/rom_8s.md#variable-addr-commands)** |\n| const char[10] | **[signature](Files/rom_8s.md#variable-signature)** |\n| char[$FFF0- *] | **[end_rom](Files/rom_8s.md#variable-end-rom)** |\n| char[2] | **[parse_vector](Files/rom_8s.md#variable-parse-vector)** |\n| unsigned int | **[signature_adress_commands](Files/rom_8s.md#variable-signature-adress-commands)** |\n| unsigned int | **[list_commands](Files/rom_8s.md#variable-list-commands)** |\n| char | **[number_of_commands](Files/rom_8s.md#variable-number-of-commands)** |\n| int | **[copyright](Files/rom_8s.md#variable-copyright)** |\n| int | **[NMI](Files/rom_8s.md#variable-nmi)** |\n| int | **[RESET](Files/rom_8s.md#variable-reset)** |\n| int | **[BRK_IRQ](Files/rom_8s.md#variable-brk-irq)** |\n\n\n\n## Attributes Documentation\n\n### variable userzp\n\n```ca65\nconst char userzp = $80;\n```\n\n\n### variable NULL\n\n```ca65\nconst char NULL = 0;\n```\n\n\n### variable list_of_commands_bank\n\n```ca65\nconst char[3] list_of_commands_bank = \"vi\";\n```\n\n\n### variable addr_commands\n\n```ca65\nint addr_commands;\n```\n\n\n### variable signature\n\n```ca65\nconst char[10] signature = \"vi 2022.4\";\n```\n\n\n### variable end_rom\n\n```ca65\nchar[$FFF0- *] end_rom;\n```\n\n\n### variable parse_vector\n\n```ca65\nchar[2] parse_vector;\n```\n\n\n### variable signature_adress_commands\n\n```ca65\nunsigned int signature_adress_commands;\n```\n\n\n### variable list_commands\n\n```ca65\nunsigned int list_commands;\n```\n\n\n### variable number_of_commands\n\n```ca65\nchar number_of_commands;\n```\n\n\n### variable copyright\n\n```ca65\nint copyright;\n```\n\n\n### variable NMI\n\n```ca65\nint NMI;\n```\n\n\n### variable RESET\n\n```ca65\nint RESET;\n```\n\n\n### variable BRK_IRQ\n\n```ca65\nint BRK_IRQ;\n```\n\n\n\n## Source code\n\n```ca65\n;----------------------------------------------------------------------\n; cc65 includes\n;----------------------------------------------------------------------\n.include \"telestrat.inc\" ; from cc65\n.include \"fcntl.inc\" ; from cc65\n.include \"errno.inc\" ; from cc65\n.include \"cpu.mac\" ; from cc65\n\n\n;----------------------------------------------------------------------\n; Orix SDK includes\n;----------------------------------------------------------------------\n.include \"../dependencies/orix-sdk/macros/SDK.mac\"\n.include \"../dependencies/orix-sdk/include/SDK.inc\"\n.include \"../dependencies/orix-sdk/include/keyboard.inc\"\n\n\n\n\n;----------------------------------------------------------------------\n; Zero Page\n;----------------------------------------------------------------------\nuserzp := $80\nNULL = 0\n\n;----------------------------------------------------------------------\n; Shell\n;----------------------------------------------------------------------\n.org $C000\n\n.code\nstart:\nrts\n\n\n\n\n.include \"commands/vi.s\"\n\n\n\nlist_of_commands_bank:\n .asciiz \"vi\"\n\naddr_commands:\n .word _vi\n\nsignature:\n .asciiz \"vi 2022.4\"\n\n\nend_rom:\n\n\n;----------------------------------------------------------------------\n;\n;----------------------------------------------------------------------\n.out .sprintf(\"Size of ROM : %d bytes\", end_rom-$c000)\n\n\n .res $FFF0-*\n .org $FFF0\n.byt 1 ; Command ROM\n; $fff1\nparse_vector:\n .byt $00,$00\n; fff3\nsignature_adress_commands:\n .addr addr_commands\n; fff5-fff6\nlist_commands:\n .addr list_of_commands_bank\n; fff7\nnumber_of_commands:\n .byt 1\n\n; fff8-fff9\ncopyright:\n .word signature\n; fffa-fffb\nNMI:\n .word start\n\n; fffc-fffd\nRESET:\n .word start\n; fffe-ffff\nBRK_IRQ:\n .word IRQVECTOR\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.699999988079071,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 18.66666603088379,
"blob_id": "c306e1451abe1dbfd77a14d51c16d967c22e5678",
"content_id": "a777edf5d3950732be7811479f3f9dd5706e3f43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 3,
"path": "/kernel/docs/return.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Return to main documentation\n\n* [Main documentation ](/)\n\n"
},
{
"alpha_fraction": 0.5807860493659973,
"alphanum_fraction": 0.5895196795463562,
"avg_line_length": 14.357142448425293,
"blob_id": "118b7216ba27f66ea310cb9ba4b731036f12a909",
"content_id": "c53d796ad138a36d403ae62ee29fe9befdf4e54d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 14,
"path": "/docs/kernel/primitives/xcrlf.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XCRLF\r\n\r\nReturns to next line.\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n BRK_TELEMON XCRLF\r\n rts\r\n```\r\n\r\n!!! tip \"See [crlf](../../../developer_manual/orixsdk_macros/crlf) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.5811277031898499,
"alphanum_fraction": 0.6238139271736145,
"avg_line_length": 32.587059020996094,
"blob_id": "2780b4e38cab3ba7e020983da8f851999aae37ac",
"content_id": "3ea2f9a7414636861bf5da2adb5ae39cafe0385b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 42358,
"license_type": "no_license",
"max_line_length": 273,
"num_lines": 1252,
"path": "/pandoc/manual_old.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Orix/Twilighte Manual\n\n{width=\"17cm\"\nheight=\"12.749cm\"}\n\nv2022.4\n\n(rev 20/07/2022)\n\n\\newpage\n\n## EXTRA DOCUMENTATION\n\n===================\n\nRom loader and firmware twilighte board menu\n--------------------------------------------\n\nhttps://raw.githubusercontent.com/orix-software/systemd/master/doc/pdf/systemd.pdf\n\n[]{#anchor}INTRODUCTION\n=======================\n\n[]{#anchor-1}What is new (v2022.3) ? \n-------------------------------------\n\n[]{#anchor-2}Firmware menu (Systemd Rom)\n----------------------------------------\n\n- Displays ROM\n\n[]{#anchor-3}Loader Menu (Systemd Rom)\n--------------------------------------\n\n- Launched atmos tape file included osid, and some demo running on\n</p> microdisc only (Ripped)\n\n[]{#anchor-4}General informations\n---------------------------------\n\nThis documentation must be use when you have installed orix version\n**2021.4** (see at the top of the banner on the oric at boot).\n\nOn [http://orix.oric.org](http://orix.oric.org/), you will have some\nyoutube videos links showiint how to use some functionnality.\n\nThe board has a firmware version. This firmware can be upgarded see\n« Hardware and firmware upgrade » section.\n\nThe board can be upgarded too but you have to send it to upgrade the\nboard see « Hardware and firmware upgrade » section » too.\n\nThe card has a 512KB of eeprom, and 512KB of RAM. This RAM is saved with\na battery. For instance, only bank 4, 3, 2 and 1 can be switched to see\nothers sets. It's a software limit. In the future, you will be able to\ndisplays all bank and starts any binary from theses banks. If you wants\nto change the set, you can use twil command. This command can switch to\neeprom bank or ram bank and can switch to any set.\n\nSome extra devices (TOM2, logitech joypad) are explained a bit in this\nmanual, but it's not adverts, we don't ear anything:) It explains some\nways to use joystick, others hardware exists in the same way)\n\n[]{#anchor-5}Features\n---------------------\n\n- .tap file fast loading (with multitap files)\n\n<!-- -->\n\n- - Joysticks support for a lot of games on atmos mode\n - the hobbit, defence-force (and others games) works without any\n patch for loading\n\n- in system : kernel update, roms and ram update (with\n [orixcfg](http://orix.oric.org/command-orixcfg/) binary)\n- 2 DB9 Joysticks (atari)\n- 512KB of EEPROM (banking mode)\n- 512KB of RAM (banking mode)\n- read/write from sdcard (MAX 64GB) or usb drive (mass storage)\n- drag and drop from the PC to the oric : It will be available on the\n oric (with WIFI connexion) : It requires extra hardware with a\n raspberry pi zero\n- fast loading : 46KB per second. A game require less than one second\n to load and start\n- cumulus compatible with the help of an other amplibus board (not\n provided)\n\n[]{#anchor-6}GETTING STARTED\n============================\n\n[]{#anchor-7}Content\n--------------------\n\n{width=\"6.08cm\" height=\"8.107cm\"}{width=\"10.509cm\" height=\"7.881cm\"}\n=================================================================================================================================================================================================================================================================================\n\n[]{#anchor-8}Physicals ports\n----------------------------\n\n{width=\"8.326cm\"\nheight=\"4.598cm\"}{width=\"9.181cm\"\nheight=\"4.262cm\"}\n\n[]{#anchor-9}Hardware limits\n----------------------------\n\nThe usb controler manage FAT32 only. Sdcard and usb key must be\nformatted with FAT32 filesystem. If you want to use pi zero gadget\ntrick, you need to do a mkfs to FAT32 file system.\n\nAll tests had been done with samsung evo sdcard and sandisk usb key. A\nlot of sdcard works, and we did not see incompatibility with sdcard.\n\nSdcard controler and usb key controler can work with 32GB storage Max.\nBut it can handle 64 GB sdcard (tested). It can handle larger sdcard/usb\nkey reader, but only 32 and 64 GB devices was used.\n\n[]{#anchor-10}Software limits\n-----------------------------\n\nThe sdcard/usb controler can handle long filename, but Orix handles 8+3\nfilename only.\n\n[]{#anchor-11}Information about joysticks part\n----------------------------------------------\n\nThe left port has only 1 button. The right port has 3 buttons. The\njoystick pinout is atari pinout. You can use standard DB9 joystick. You\ncan also plug « TOM2 » hardware (not provided), it can connect a usb\nmouse or usb joypad (wireless) to theses ports. For example, logitech\njoypad F710 (wireless) works with TOM2.\n\nPlease note that TOM2 can only handle 2 buttons. It means that the third\nbutton can't work with TOM2 connected.\n\n{width=\"17cm\" height=\"3.902cm\"}\n--------------------------------------------------------------------------------------------\n\n{width=\"10.659cm\" height=\"7.712cm\"}[]{#anchor-12}First boot : Initialize the storage\n-------------------------------------------------------------------------------------------------------------------------------------------------\n\nWhen the card is sent, kernel is built with a default storage. In order\nto know which device is the default one, you can type « mount ». You can\nonly use one device at the same time, but you can swap easily theses\ndevices from command line.\n\nIf you see « sdcard », then sdcard will be read by default. You can\nchange it, with a command : « twil -u », it will switch to usbdrive. If\nyou want to have usb drive by default, you can program kernel with the\ntool « orixcfg ». See Orixcfg section.\n\nNow, if you know which device you will use by default, you can install\nall software on it.\n\nPlug the device on your PC (sdcard or usb key). If you have a pi zero w,\nyou can do this with drag and drop solution from the PC.\n\nDownload sdcard.tgz from this :\n<http://repo.orix.oric.org/dists/official/tgz/6502/>\n\nIt contains all software for orix there is others which are not\navailable in this archive.\n\nNow, use 7zip on your PC (or tar/gzip under linux), and unzip all files\nfrom this sdcard.tgz. Put all theses new files in your device root\nfolder.\n\nNow, you can insert the device (sdcard or usbkey -- or pi zero) in the\ntwilighte board and play.\n\n[]{#anchor-13}Upgrade from v2022.1 to v2022.3\n---------------------------------------------\n\nIf your orix version is below v2022.1 version, please go to annexes part at the\nend of this document, before you try to upgrade to v2022.3\n\n- Download\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\n or sdcard : It could require some time to copy because there is a\n lot of small files (tap, hlp etc)\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2022.3*\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2022.3\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2022.3\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n\n\n[]{#anchor-14}Optionnal step for upgrade\n----------------------------------------\n\nNow bank displays all banks from l to 64. It means that you should have\nsome strange bank signature for eeprom. Now an empty set is provided in\n*/usr/share/carts/2021.3 *folder. With Orixcfg you can initialize your\nset with this cart. Don't use « -s 4 » flag for orixcfg when you want to\nload emptyset.\n\n[]{#anchor-15}First step : type a command\n-----------------------------------------\n\nYou can access to available command from many ways :\n\n- From /bin folders, there is binary available on current device, 'ls'\n will show you available commands\n- From banks : type « help -b5 » you will see available commands\n\n\n\n[]{#anchor-16}BANK\n==================\n\n[]{#anchor-17}Usage \n--------------------\n\nBank command is command line tool to see which bank are loaded into\nEEPROM bank and RAM bank. Each bank has a « signature ». Bank allows to\nsee theses banks.\n\nBank can also starts a ROM with his id. In that case, you don't need to\nhave a rom « orix friendly » and you can start it from command line. In\nthe current bank version, there is restriction to launch a command.\n\n[]{#anchor-18}List all bank (when ROM signature is valid)\n---------------------------------------------------------\n\n/\\#bank\n\nBank 1 to 32 is eeprom bank and bank 33 to 64 are ram bank\n\n[]{#anchor-19}Displays all signature even when ROM is not valid\n---------------------------------------------------------------\n\n/\\#bank\n\n[]{#anchor-20}List all commands from a bank \n--------------------------------------------\n\n/\\#help -b5\n\n[]{#anchor-21}Start a specific bank\n-----------------------------------\n\n/\\#bank 1\n\nIf you need to load a rom into a bank, you need to have a look to\norixcfg binary.\n\n[]{#anchor-22}SHELL\n===================\n\n[]{#anchor-23}Flush the current command line \n---------------------------------------------\n\nCtrl+c\n\n[]{#anchor-24}Available commands\n--------------------------------\n\nYou can see available commands with this command :\n\n/\\#help -b5\n\nThe command line is limited in characters. If you reach this limit, you\nwon't be able to type the complete command line\n\n[]{#anchor-25}Known bugs\n------------------------\n\n1) If you return to a line when your command line is bigger than 40\n columns, if you try to del a chars at the previous line, you can't.\n2) « ./ » can not be used to launch a binary\n\n[]{#anchor-26}BASIC11\n=====================\n\nLaunch\n------\n\nYou can type basic11 or press FUNCT+B to start\n\n[]{#anchor-27}Load a personal .tap file\n---------------------------------------\n\nWhen you starts basic11 commands, the default path is\n*« /home/basic11/ ». Each action on the basic11 mode will be done in\nthis folder (cload/csave). If you cload a tape file, it must be in\n« /home/basic11 » folder.*\n\n{width=\"7.544cm\"\nheight=\"5.447cm\"}You have downloaded a .tap file, and want to use it.\nThen, you can create a folder /*home*/basic11/\n\nUnder Orix\n\n/\\#mkdir home\\\n/\\#cd home\\\n/home\\#mkdir basic11\\\n/home\\#cd basic11\n\nPut you file in this folder from your PC, and start basic11 (you don't\nneed to be in the «/home/basic11 » folder to start basic11 with no\nparameter. By default, basic11 starts in « /home/basic11/ »\n\n[]{#anchor-28}Oric.org tape file\n--------------------------------\n\nWhen you downloaded sdcard.tgz and unzip it into sdcard or usbkey\ndevice, there is many tape file included in this archive. You don't need\nto move these type file, if you know the key, you can starts it from\ncommands line. In this case, it will load the correct basic1.1 rom to\nstart the tape file (see below), and the correct joystick configuration\nif it's correct.\n\n[]{#anchor-29}Oric.org tape file update\n---------------------------------------\n\nEach week a new software.tgz is generated. You can download it from\n« repo » and unzip it on the device. It will generate last tape file and\nlast joysticks configuration.\n\n[]{#anchor-30}Search a tape file from command line\n--------------------------------------------------\n\n{width=\"7.304cm\"\nheight=\"5.398cm\"}[]{#anchor-31}Basic11 has also many.tap files inserted\nin sdcard.tgz\n\nTry to find the software with option -l\n\n/\\# basic11 -l\n\nIf you find your software, you can do perform **ctrl+c.**\n\nYou can type space to do a pause.\n\nOn that case, you can launch the tape file like :\n\n/\\# basic11 «KEYDISPLAYED\n\nWhen KEYDISPLAYED is the key displayed in key column. Please note that\nthe key must be in **UPPERCASE**\n\n[]{#anchor-32}Load a tap file from command line\n-----------------------------------------------\n\nNote that MYFILE must be in **UPPERCASE**\n\n/\\# basic11 «MYFILE\n\nIf MYFILE is in the oric.org database, it will launch the software with\nthe filename MYFILE.\n\nIf basic11 command does not find MYFILE in the oric.org database, it\nwill try to load it from /home/basic11/ folder.\n\n[]{#anchor-33}Save your program \n--------------------------------\n\n\\\nIf you start « basic11 » with no options, basic rom will starts and each\ncsave (or cload) actions will store files in « /*home/basic11 » folder*\n\n[]{#anchor-34}Start basic11 menu\n--------------------------------\n\n{width=\"7.384cm\"\nheight=\"5.341cm\"}If you type « basic11 -g » on command line or FUNCT+G,\nyou will have a menu with all software which have a download link on\noric.org (only atmos version and when a tape file is available).\n\n/\\#basic11 -g\n\nYou can use left and right letters to change to a new letter. If the\nletter is empty, it means that there is no available tap file for this\nletter.\n\nYou can use up and down link to navigate into software. If you press\nenter, the software will starts.\n\nNote that not all games are working yet. Some times, chars are\ncorrupted. If the joysticks does not works, there is two case :\n\n- the game does not call rom routine to manage keyboard\n- keyboard mapping is not done yet\n\nYou can use arrows to navigate into the menu :\n\n- up and down to select the software\n- right and left to switch to the menu letters\n\nSome letters are empty. It means that there is no software with tape\nfile available on oric.org for this letter\n\n[]{#anchor-35}Quit basic11\n--------------------------\n\nIf you want to quit basic11 from interpreter command line, you can type\n« QUIT ». This will force to reboot to Orix (you can also use reset\nbutton)\n\n[]{#anchor-36}How the .tap file starts\n--------------------------------------\n\nIf you only type « basic11 », this will start bank 6 (normal basic rom).\nThe default folder in that case is «/*home/basic11 »*\n\nIf you type « basic11 » with a tape file as an argument, there is 2\ncases\n\n1. The tape file (key) is already known in oric.org website, then\n basic11 try to find it in its databank file (/var/cache/basic11/\n folder). If the key is found, it will start the tape file located in\n «/usr/share/basic11/\\... »\n2. If the key is unknown, it will try to find it in «/home/basic11 »\n\nIf the tap file is in the oric.org db file, basic11 will load the\nsoftware configuration from the db software file (as joystick\nconfiguration, and the id of the rom). Basic11 load the right rom into\nram bank, override the default basic11 path to the tape file folder\n(« *usr/share/basic11/\\[firstletter software\\]. *\n\nIt means that if you load this kind of software and you can quit the\nsoftware, each file action in basic11 rom, will be performed in\n« usr/share/basic11/\\[firstletter software\\]. »\n\n[]{#anchor-37}Not working tapes (for instance)\n----------------------------------------------\n\n- All Oric-1 games can be started with FUNCT+L in ROM menu : start\n oric-1 (depending of your device), and put .tap files in\n /home/basic10\n- Software which does not work (25), but the number can be reduced in\n future release.\n\n ----------------------------- --------------------- ------------------------------------------------------------\n cobra Cobra pinball Damsel in distress\n Rush hour 4K \n Le diamant de l'ile maudite Durendal HU\\*BERT\n Hunchback Schtroumpfs Stanley (ROM 0,1 tested)\n Them Titan Visif\n Xenon III Dig Dog Elektro Storm\n Kilburn Encounter Le tresor du pirate L'aigle d'or (ROM 0,1 tested)\n Compatible (micropuce) Volcanic demo Clavidact\n DAO Cobra Soft CW-Morse The Hellion\n MARC Caspak Kryllis : when we lost one life, the game does not restart\n ----------------------------- --------------------- ------------------------------------------------------------\n\n[]{#anchor-38}Tape with altered charset\n---------------------------------------\n\n ------------ ------------ -------------------------\n Fire flash Scuba Dive 3D fongus (i,f letters)\n \n ------------ ------------ -------------------------\n\n[]{#anchor-39}Joysticks issues\n------------------------------\n\nWe did keyboard/joystick mapping for a lot of games, but we did not set\nthe keyboard mapping for all software. If you want to help us, contact\nus.\n\nSome game does not work because they handle their own keyboard routine.\nIt could be handle with hardware tricks but, it's not done.\n\nSome others games uses special keys (SHIFT, CTRL) for direction or the\nfirst button. Theses cases are not handle yet : but it could in the\nfuture.\n\n[]{#anchor-40}Software update changelog\n---------------------------------------\n\nYou need to download software.tgz in the official repo for :\n\n- Mr wimpy added to the archive (takes effect 20/01/2021)\n- Airfox added to the archive (takes effect 20/01/2021)\n- Atlantid added to the archive (takes effect 20/01/2021)\n- Centrale nucléaire added to the archive (takes effect 20/01/2021)\n- Cobra invaders added to the archive (takes effect 20/01/2021)\n- Coctail Recipies added to the archive (takes effect 20/01/2021)\n- Crusher added to the archive (takes effect 20/01/2021)\n- Death Driver added to the archive (takes effect 20/01/2021)\n- Challenge voile added to the archive (takes effect 20/01/2021)\n- Breakout 1K added to the archive (takes effect 20/01/2021)\n- DAO added to the archive (takes effect 20/01/2021)\n- echecs asn\n- dialogue\n\n« added to the archive » means that you need to download softwares.tgz\nfrom the repo after the displayed date to get it in the archive\n\n[]{#anchor-41}BOOTFD\n====================\n\n[]{#anchor-42}Introduction\n--------------------------\n\nbootfd is a tool to boot the boot sector when a drive is connected.\nInsert a disk and type :\n\n/\\#bootfd\n\nThe binary is located to bin folder. It will load microdisc rom and\nstart it. If the binary displays that it does not found microdis.rom,\nthen place microdis.rom file in the right folder.\n\nIf you have firmware 1, you will be able to load « blake's 7 ». If you\nhave firmware 2, all sedoric .dsk should start.\n\nFor instance, only Space99 does not work, it stops after intro.\n\n[]{#anchor-43}CKSUM\n===================\n\n{width=\"8.334cm\" height=\"6.02cm\"}[]{#anchor-44}Usage\n-----------------------------------------------------------------------------------------------------------------\n\nCksum is a tool to compute a checksum of a file and displays its size.\n\n[]{#anchor-45}DF\n================\n\n[]{#anchor-46}Usage\n-------------------\n\nIt displays available blocks on current device\n\n{width=\"17cm\"\nheight=\"12.293cm\"}\n\n[]{#anchor-47}DSK-UTIL\n======================\n\n[]{#anchor-48}Introduction\n--------------------------\n\nThis tool is useful to extract files from dsk file. You can extract it,\nand uses command line tool to use it. For example, if you extract a\nbasic program (.bas in FTDOS .dsk file), you can see it with « list »\nbinary. If it's a .hrs/.hir file, you can read it with viewhrs file.\n\nYou can create a «/*home/sedoric/ » *folder and adds .dsk sedoric files\nin this folder\n\nSome .dsk files are imported in sdcard.tgz. For sedoric, you can have a\nlook to «/usr/share/sedoric/ » and for ftdos : « */usr/share/ftdos »*\n\n[]{#anchor-49}List files from .dsk (sedoric)\n--------------------------------------------\n\n/home/sedoric\\# dsk-util -s ls sed.dsk\n\n[]{#anchor-50}Extract a file from sedoric .dsk file\n---------------------------------------------------\n\n/home/sedoric\\# dsk-util -s e sed.dsk myfile.hrs\n\n[]{#anchor-51}Extract only .hrs files from sedoric .dsk file\n------------------------------------------------------------\n\n/home/sedoric\\# dsk-util -s e sed.dsk \\*.hrs\n\n[]{#anchor-52}FORTH\n===================\n\n[]{#anchor-53}Use forth\n-----------------------\n\nYou can use forth language. It works the same ways than Tele forth (it's\nteleforth but it write file on sdcard/usbdrive).\n\nYou can download Teleforth langage in order to see how to program in\nforth.\n\nWhen you type « forth » forth starts with the current folder directory.\n\nIf you were in /, forth will load and save file in this folder.\n\nIn others cases, you can create a forth folder in home and goes into it\nin forth for example :\n\n/\\#mkdir home\\\n/\\#cd home\\\n/\\#mkdir forth\\\n/\\#forth\n\nif you type « cd forth» in forth environnement, all files actions will\nbe perform in « /*home/*forth »\n\n[]{#anchor-54}HEXDUMP\n=====================\n\n[]{#anchor-55}Introduction\n--------------------------\n\n{width=\"7.685cm\"\nheight=\"5.595cm\"}Can dump in hexadecimal and ascii the content of a\nfile. Ctrl+c and space are working\n\n[]{#anchor-56}IOPORTS\n=====================\n\n[]{#anchor-57}Introduction\n--------------------------\n\nDisplays I/O address of the board\n\n[]{#anchor-58}LS\n================\n\n[]{#anchor-59}Introduction\n--------------------------\n\n« ls » list all the file in the current folder. Token are supported\n(\\*,?) ex : « ls ?e.tap » will list all files with a 'e ' in the second\nletter\n\n[]{#anchor-60}list all the files in the current folder\n------------------------------------------------------\n\n/\\#ls\n\n[]{#anchor-61}List all \\*.tap files\n-----------------------------------\n\n/\\#ls \\*.tap\n\n[]{#anchor-62}List size and datetime of the file\n------------------------------------------------\n\n/\\#ls -l\n\n[]{#anchor-63}LSCPU\n===================\n\n[]{#anchor-64}Introduction\n--------------------------\n\nDisplays cpu info. It detects : 6502, 65c02 and 65816\n\n[]{#anchor-65}MONITOR\n=====================\n\n[]{#anchor-66}Usage\n-------------------\n\nMonitor is a rom which can displays a monitor. It's teleass without\nassembler part.\n\n[]{#anchor-67}ORIXCFG\n=====================\n\n[]{#anchor-68}Update kernel, shell : orixcfg\n--------------------------------------------\n\nWhen a new released is done, you can update the eeprom with the new\nkernel and new roms.\n\nIf you want to know if you need to update the kernel, you can compare\nyour current version, and the last release version. You can go to\n[http://orix.oric.org](http://orix.oric.org/) You need to have a look to\nthis release below :\n\n{width=\"17cm\"\nheight=\"9.888cm\"}\n\nIf on your Oric screen, it's not the same value, you can update it. The\nsequence of the Orix release is Year.X. There is 4 releases a year, and\neach one must be done until you reach the final one, in order to avoid\nsome case. If your version is v2020.3 and the last available version is\nv2021.4. You need to update to v2020.4, then v2021.1, v2021.2, v2021.3,\nv2021.4.\n\nIt's maybe possible to jump to version v2021.4, but it's at your own\nrisk because there is a « chance » that some kernel calls changed, and\norixcfg could do wrong step.\n\n[]{#anchor-69}Update kernel, shell\n----------------------------------\n\nWhen you need to update kernel, you can update it with orixcfg. You just\nneed to define set 4 on the command line. This step is very\n**dangerous** if you don't load the right file. There is no verification\nand any file on the command line will be load in the kernel set.\nUsually, kernel set file is named kernxxxx.r64.\n\nIf you did something wrong on this step, you won't be able to start orix\nagain. It means that you will need to remove eeprom from the card and\nprogram it with a eeprom programmer\n\nThis next command will load kernel.r64 to kernel set. Please wait until\nOrix reboots. If you have a kernel 2020.3 and you need to load a kernel\n2021,1, you will need to load previous kernel set before the update of\n2021.1.\n\n.r64 extension means that it's a 64KB set. It's usually used to define\nthat the file contains 4 roms of 16KB.\n\nPlease note that we provide 2 kernels version. One named\n« kernelsd.r64 » which means that the default device will be sdcard, and\nthe other one « kernelus.r64 » which means that default device will be\n« usb » (usbkey). If you load the wrong kernel at this step, you can use\ntwil command to switch to the right device, and you can start again\nkernel update with the right file (kernelsd.r64 or kernelus.r64\ndepending of your configuration).\n\n/\\#orixcfg -r -s 4 kernelsd.r64\n\n[]{#anchor-70}Load a ROM into a ram slot\n----------------------------------------\n\nSpace between values and switches are not optionnal, orixcfg needs\ntheses spaces\n\n/\\#orixcfg -b XX -l myrom.rom\n\nThis command will load myrom.rom (in the current path), in RAM bank XX\n\nOlder usage as : orixcfg -r -s X -b Y myrom.rom is no longer included in\norixcfg since orixcfg v2021.3\n\n[]{#anchor-71}Load a set of ROM into ROM slot\n---------------------------------------------\n\n/\\#orixcfg -r -s 0 myrom.r64\n\n[]{#anchor-72}This command will load myrom.r64 (in the current path), in\nset 0. For instance, you can not load one bank, you need to load 64KB\nset.\n\n[]{#anchor-73}Clear bank ram or initialize it\n---------------------------------------------\n\nRam bank are not initialized when the board is tested. If you have\ngarbage on screen when you uses bank (after you used twil -w). You have\nto clear all ram bank (ram bank are battery saved).\n\nIf you want to clear bank 4 of the set 0, you can do this command. You\nneed to do this command for each bank of each set. For instance, there\nis no switch to clear all the ram with one command.\n\n/\\#orixcfg -w -s 0 -b 4 -c\n\n[]{#anchor-74}Flush all ram bank\n--------------------------------\n\n/\\#orixcfg -w -f\n\n[]{#anchor-75}OSID MUSIC\n========================\n\n[]{#anchor-76}How to play osid music ?\n--------------------------------------\n\nYou need to check if you have twilighte board firmware 2 :\n\n/\\#twil -f\n\nIf it returns 2 or greater, you can download some osid files :\n\n<https://www.oric.org/software/osid_music-2534.html>\n\nPlace all .tap files in /home/basic11\n\nAnd launch :\n\n/\\#basic11\n\nLoad patch to avoid to load sedoric routines (in basic command line)\n\nCLOAD«OSID\n\nAnd then load the osid file you want :\n\nCLOAD«OSNEVER\n\n[]{#anchor-77}PWD\n=================\n\n[]{#anchor-78}Introduction\n--------------------------\n\nDisplays current PWD\n\n[]{#anchor-79}SHA1\n==================\n\n[]{#anchor-80}Usage\n-------------------\n\nSha1 is a tool to displays a string into sha1 encoding\n\n[]{#anchor-81}STORMLORD\n=======================\n\n[]{#anchor-82}Introduction\n--------------------------\n\nStormlord is Stormlord game port to Orix. You can use joysticks to plays\nto this game.\n\n[]{#anchor-83}Only one joystick port is working on this version\n===============================================================\n\n[]{#anchor-84}SYSTEMD\n=====================\n\nSystemd is a rom which can load another ROM in ram slot. When you type\nsystemd, it will reads * /etc/systemd/banks and will load rom declared\nin this file sequencialy. It means that the first rom will be load in\nbank id 33, the second one in bank id 34.*\n\nThis roms can be used in a eeprom bank, you can load it with orixcfg\n\nYou can set roms in *« /etc/systemd/banks » as : *\n\n*\\[MYROMNAME1\\]*\n\n*path=/usr/share/rom/my.rom*\n\n*\\[MYROMNAME2\\]*\n\npath=/usr/share/rom/my2.rom\n\n[]{#anchor-85}TWIL\n==================\n\n[]{#anchor-86}Introduction\n--------------------------\n\nTwil command can displays the current firmware of twilighte card, and\ncan swap root folder to usbkey or sdcard.\n\nTwil command can also swap bank 4, 3, 2 and to eeprom set or ram set.\n\n[]{#anchor-87}Displays Twilighte board firmware\n-----------------------------------------------\n\n/\\#twil -f\n\n[]{#anchor-88}Swap to sdcard for root file system\n-------------------------------------------------\n\n/\\#twil -d\n\n[]{#anchor-89}Swap to usb key for root file system\n--------------------------------------------------\n\n/\\#twil -u\n\n[]{#anchor-90}PI ZERO connection for DRAG AND DROP\n==================================================\n\n[]{#anchor-91}Drag and drop or file copy to the pi zero\n-------------------------------------------------------\n\nIf you have a pi zero, you can use it to simulate a usb key with wifi\nconnectivity for drag and drop.\n\nYou need to follow this documentation :\n<http://orix.oric.org/drag-and-drop-files-from-pc-to-the-card/>\n\nPlease note that you must connect pi to usb twilighte port on his usb\nport (not the psu port). Also note that pi consumes power and if you use\na psu lower than 2 A, you should have some hardware errors\n\n{width=\"17cm\"\nheight=\"11.769cm\"}Some usb cable are only for charge, it means that it\nwon't work with the card and pi zero, because it missed data wires\n\n[]{#anchor-92}CUMULUS COMPATIBILITY\n===================================\n\n[]{#anchor-93}How to connect a cumulus \n---------------------------------------\n\nOn the current firmware (Firmware 1) : and current hardware (board\nversion v0.65), we have to do some hacks to have cumulus working. But,\nyou will only launch two diskfile. Anyway, you can access to drive with\nno restriction, except bank switching. See « Hardware and firmware\nupgrade », if you want to avoid theses modifications\n\nIn firmware 1, and with board modification, there is only two working\ndisk : Blake's 7 and VIP2015.\n\nPlease, test your cumulus on the oric connected to the board. If it does\nnot work on your Oric, it won't work too with card plugged !\n\nIf you want to use cumulus, you have to :\n\n[]{#anchor-94}1) cut 4 pins on daughter card (ROMDIS, MAP, A14, A15)\n\n{width=\"11.137cm\"\nheight=\"14.185cm\"}\n\n[]{#anchor-95}2) remove eprom from cumulus\n\n{width=\"15.722cm\"\nheight=\"20.657cm\"}\n\n[]{#anchor-96}3) add another amplibus before twilighte daughter board\n\n{width=\"17cm\"\nheight=\"15.452cm\"}\n\n[]{#anchor-97}4) Connect all cards to the oric\n\n{width=\"16.346cm\" height=\"18.554cm\"}\n-------------------------------------------------------------------------------------------------\n\n[]{#anchor-98}Twilighte board firmware compatibility\n----------------------------------------------------\n\nOnly firmware 2 is available to use boot sector to start Microdisc disk.\n\n[]{#anchor-99}Hardware and firmware upgrade\n===========================================\n\n[]{#anchor-100}Firmware upgrade\n-------------------------------\n\nThere is only one firmware available. The version 2 is in development.\n\n[]{#anchor-101}First method : For those who have programmers and some hardware tool\n-----------------------------------------------------------------------------------\n\nBut, when it will be released, you could update the firmware with :\n\n1\\) a plcc extractor\n\n2\\) altera software (Quartys v13)\n\n3\\) a Jtag programmer\n\n4\\) solder the jtag connector\n\n5\\) get .pof file\n\n[]{#anchor-102}Second method : send the card to the author of the card (me)\n---------------------------------------------------------------------------\n\nIn that case, fimware upgrade will be done, and you could ask to upgrade\nto new board version to add (sometimes new functionnality)\n\n[]{#anchor-103}TROUBLE SHOOTING\n===============================\n\n[]{#anchor-104}'ls' displays garbage on screen\n----------------------------------------------\n\nInsert your sdcard or your usb drive into your PC. You should have\nstrange « file » on the sdcard : remove theses files.\n\n[]{#anchor-105}Impossible to mount a usb key or a sdcard\n--------------------------------------------------------\n\nThe sdcard must be in FAT32 format\n\n[]{#anchor-106}Screen garbage when i use bank\n---------------------------------------------\n\nIf you have screen garbage when you switched to ram bank before with\n« twil -w »\n\nIt means that ram bank are not initialized. See orixcfg section to fix\nit\n\n[]{#anchor-107}Pi zero always reboots\n-------------------------------------\n\nCheck your PSU. If you have a 2A PSU and you have a pi zero, cumulus and\nTOM2 connected, you should reach the PSU limits. If you can't get\nanother PSU, you can disable bluetooth of you pi zero, or you can also\ndownclock from 1Ghz to 700mhz for example.\n\nYou can also use a 3A PSU. In the future, it will be possible to add\nanother PSU on the board.\n\n[]{#anchor-108}When i start Orix, filesytem is unstable or displays usb controler not found\n-------------------------------------------------------------------------------------------\n\nIf you have pi zero connected, it could answer to the controler partial\ninformation or could hang the usb controler because controler does not\nunderstand usb data when it sends information to usb port.\n\nYou have to wait a bit. If you want to verify this, you can switch off\nthe oric (and then the pi zero), switch on the oric with Orix, and type\n'debug', if you have another value than \\#AA for ch376 check exists,\nit's the problem, if you do 'debug' another value will be displayed but\nnot \\#AA. In fact, when pi zero boot, usb controler is unstable.\n\n[]{#anchor-109}« I/O Error » is displayed\n-----------------------------------------\n\nYou can reach this message in some case :\n\n1. device (sdcard or usbdrive is missing)\n2. after a launch of « df » command : There is an issue, the controler\n is in incorrect state after this command. It's a bug\n\nYou can usually fix it by launching « ls » twice. Because « ls » handles\na reset order to the usb controler when it does not produce the correct\nanswer. It means that if USB controler is not working well, ls displays\nthe error message and will produce a reset command to the controler. If\nyou launch ls again, it will work.\n\n[]{#anchor-110}The oric does not work : black screen\n----------------------------------------------------\n\nIf you have a pi zero connected on usb port, unplug it. Boot the oric,\nand now insert pi zero into usb port\n\n[]{#anchor-111}Kernel panic\n---------------------------\n\nWhen kernel can't solve a « free memory kernel call» in a binary, it\ncould produce a kernel panic. In that case, you need to do a reset.\nThere is a bug in kernel 2021.1 which could produce this error. It will\nbe corrected as soon as possible.\n\n[]{#anchor-112}A folder is displayed on my PC but not under my Oric\n-------------------------------------------------------------------\n\nSometimes sdcard or usbkey has bad format for the usb controler and it\ncan reads some content. Format the usb key or sdcard and install again\nall files. Or try another usb key/sdcard\n\n[]{#anchor-113}I have strange behavior when un do csave or cload on basic ROM : It's always the same file event i cload another content\n---------------------------------------------------------------------------------------------------------------------------------------\n\nSometimes sdcard or usbkey has bad format for the usb controler and it\ncan reads some content. Format the usb key or sdcard and install again\nall files. Or try another usb key/sdcard\n\n[]{#anchor-114}Q&A\n==================\n\n[]{#anchor-115}I want to change the current directory\n-----------------------------------------------------\n\nSee « cd » command\n\n[]{#anchor-116}I want to see which bank are loaded into ROM and RAM\n-------------------------------------------------------------------\n\nSee «bank» section\n\n[]{#anchor-117}I want to read a .dsk file\n-----------------------------------------\n\nYou can only extract files from a .dsk file (see « dsk-util »)\n\nIf you have a cumulus board, you can use « bootfd » and connect your\ncumulus on expansion board « see how to connect a cumulus section »\n\n[]{#anchor-118}I can't type anything in basic rom (« basic11 » command)\n-----------------------------------------------------------------------\n\nThere is a firmware bug on some board which generate a false state for\nthe third button of a joystick. The easier step to avoid this, is to\nconnect a joystick to the left port on the board.\n\nThe issue can be fixed by upgrading firmware board (it needs to open the\nbox and program the firmware with Jtag port)\n\n[]{#anchor-119}ANNEXES\n======================\n\n[]{#anchor-120}Firmware version\n-------------------------------\n\n --------- ------------------------------------------------------------- ------------\n Version Features Known bugs\n 1 RAM/ROM switch, ROM programmation, joysticks, usb controler N/A\n 2 Start all sedoric disks from cumulus N/A\n --------- ------------------------------------------------------------- ------------\n\n[]{#anchor-121}Upgrade from older version\n-----------------------------------------\n\n\nUpgrade from v2021.4 to v2022.1\n---------------------------------------------\n\nIf your card is below v2021.4 version, please go to annexes part at the\nend of this document, before you try to upgrade to v2022.1\n\n- Download\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\n or sdcard : It could require some time to copy because there is a\n lot of small files (tap, hlp etc)\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2022.1*\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2022.1\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2022.1\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n\nUpgrade from v2021.3 to v2021.4\n-------------------------------\n\nIf your card is below v2021.3 version, please go to annexes part at the\nend of this document, before you try to upgrade to v2021.4\n\n- Download\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\n or sdcard : It could require some time to copy because there is a\n lot of small files (tap, hlp etc)\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2021.4*\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2021.4\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2021.4\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n\n[]{#anchor-122}Upgrade from v2021.2 to v2021.3\n----------------------------------------------\n\nYou need to unzip/untar orixcfg new version here :\n<http://repo.orix.oric.org/dists/2021.3/tgz/6502/orixcfg.tgz>\n\n- Download\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz> or\n <http://repo.orix.oric.org/dists/2021.3/tgz/6502/cardridge.tgz>\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\n or sdcard : It could require some time to copy because there is a\n lot of small files (tap, hlp etc)\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2021.3*\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2021.3\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2021.3\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n\n[]{#anchor-123}Optionnal step for upgrade\n-----------------------------------------\n\nNow bank displays all banks from l to 64. It means that you should have\nsome strange bank signature for eeprom. Now an empty set is provided in\n*/usr/share/carts/2021.3 *folder. With Orixcfg you can initialize your\nset with this cart. Don't use « -s 4 » flag for orixcfg when you want to\nload emptyset.\n\n### []{#anchor-124}Upgrade from v2021.1 to v2021.2\n\nIf your card is below v2021.1 version, please go to annexes part at the\nend of this document, before you try to upgrade to v2021.2\n\n- Download\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\n or sdcard : It could require some time to copy because there is a\n lot of small files (tap, hlp etc)\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2021.2*\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2021.2\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2021.2\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n\n### []{#anchor-125}From 2020.4 to 2021.1\n\nDownload : <http://repo.orix.oric.org/dists/2021.1/tgz/6502/carts.zip>\n\nUnzip it on your device (sdcard/usbkey)\n\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2021.1*\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2021.1\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2021.1\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n"
},
{
"alpha_fraction": 0.6042031645774841,
"alphanum_fraction": 0.6287215352058411,
"avg_line_length": 14.852941513061523,
"blob_id": "95d7d868e0bf4abc76fa14548cdc41bfc3332319",
"content_id": "ab30b3d7eeff09c59abc39c7ba29abed65d1bd3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 34,
"path": "/docs/samples/c_samples/getcwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Get CWD in cc65\r\n\r\nFor instance chdir, is not manage in cc65, then here is a workaround.\r\n\r\nCreate a file _ogetcwd.s. It must contains :\r\n\r\n```ca65\r\n.include \"telestrat.inc\"\r\n\r\nXGETCWD=$48\r\n\r\n.export _ogetcwd\r\n\r\n.importzp tmp1\r\n\r\n.proc _ogetcwd\r\n BRK_TELEMON XGETCWD\r\n sty tmp1\r\n ldx tmp1\r\n rts\r\n.endproc\r\n```\r\n\r\nAnd now, you C code, you can do (don't forget to add _ogetcwd.s to your command line to build _ogetcwd.s when you launch cl65):\r\n\r\n```c\r\nextern unsigned char ogetcwd();\r\n\r\n\r\nint main() {\r\n printf(\"Cwd : %s\",ogetcwd());\r\n return 0;\r\n}\r\n```"
},
{
"alpha_fraction": 0.5355450510978699,
"alphanum_fraction": 0.5581265687942505,
"avg_line_length": 18.494565963745117,
"blob_id": "b5d7e11dafb35be2ef1ca9fff8c1ee869a6d0e1c",
"content_id": "28c093163b307f63a58c6a39eeb20c3301b3b9cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3587,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 184,
"path": "/doxygen/doxybook_output/Files/vi__key__enter_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_enter.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_enter.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_key_enter](Files/vi__key__enter_8s.md#function-vi-key-enter)**() |\n\n\n## Functions Documentation\n\n### function vi_key_enter\n\n```cpp\nvi_key_enter()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_key_enter\n\n jsr vi_editor_switch_off_cursor\n\n lda #CR\n jsr vi_add_char_to_text\n\n lda #LF\n jsr vi_add_char_to_text\n\n ; Are we on the last\n ; if xpos==0 then goto @check_others_cases\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n bne @check_others_cases\n\n ; if ypos==VI_LAST_LINE_EDITOR then goto @scroll_last_line\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n cmp #VI_LAST_LINE_EDITOR\n beq @scroll_last_line\n tax\n scroll down,,26 ; Because the second empty arg is provided\n jsr vi_ypos_screen_plus_plus\n rts\n@scroll_last_line:\n scroll up,0,25 ; Because the second empty arg is provided\n rts\n\n@check_others_cases:\n ; if ypos < VI_LAST_LINE_EDITOR then goto @scroll_for_insert\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n cmp #VI_LAST_LINE_EDITOR\n bne @scroll_for_insert\n\n ; else scrollup(0,26);\n scroll up,0,26\n\n jmp @continue\n\n@scroll_for_insert:\n\n sta vi_tmp1\n ; if posx=0 then goto @we_are_at_pos_0 (scroll)\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n bne @we_are_at_pos_0\n\n ; first_line_to_scroll++;\n inc vi_tmp1\n\n ; if (ypos>26) then goto @last_line_case_scroll\n jsr vi_ypos_screen_plus_plus\n cmp #$00\n bne @last_line_case_scroll\n jsr vi_set_xpos_0\n rts\n@we_are_at_pos_0:\n ; scrolldown(first_line_to_scroll,26)\n ldx vi_tmp1\n inx\n scroll down,,26 ; Because the second empty arg is provided\n ; Now copy the current line\n jsr vi_ypos_screen_plus_plus\n jsr vi_compute_video_adress\n\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n@L1:\n ldy #$00\n lda (vi_ptr1),y\n cmp #CR\n beq @exit\n sta (vi_ptr_screen),y\n\n sty vi_tmp2\n\n inc vi_ptr_screen ; 98\n bne @out_compare\n inc vi_ptr_screen+1\n@out_compare:\n\n\n inc vi_ptr1 ; 98\n bne @out_compare2\n inc vi_ptr1+1\n@out_compare2:\n\n\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1\n cmp vi_tmp1\n bne @not_eof\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1+1\n cmp vi_tmp1\n bne @not_eof\n beq @eof\n\n@not_eof:\n\n jmp @L1\n\n@eof:\n@exit:\n ; Now we erase the char on the previous line\n\n jsr vi_ypos_screen_sub_sub\n\n jsr vi_compute_video_adress\n\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n\n lda #$00\n@L2:\n sta (vi_ptr_screen),y\n iny\n cpy #VI_EDITOR_MAX_COLUMN\n bne @L2\n\n jsr vi_ypos_screen_plus_plus\n\n jsr vi_set_xpos_0\n rts\n\n@continue:\n jsr vi_set_xpos_0\n jsr vi_ypos_screen_plus_plus\n rts\n@last_line_case_scroll:\n ;FIXME\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5004068613052368,
"alphanum_fraction": 0.5296989679336548,
"avg_line_length": 14.172839164733887,
"blob_id": "e9f1437aec208c3192b8a766cd71cd7ec1e575b1",
"content_id": "59101944a88fd6d80486923525ded7bb8c203b17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1229,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 81,
"path": "/doxygen/doxybook_output_vi/Files/vi__edition__keyboard_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_edition_keyboard.s\n\n---\n\n# /Routines/vi_edition_keyboard.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_edition_keyboard](Files/vi__edition__keyboard_8s.md#Routine-vi-edition-keyboard)** |\n\n\n## Routine documentation\n\n### Routine vi_edition_keyboard\n\n```ca65\nvi_edition_keyboard\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_edition_keyboard\n jsr vi_clear_command_line\n jsr vi_displays_info\n\n\n@loop:\n cgetc ; read keyboard\n\n cmp #'i'\n beq switch_to_edition_mode\n cmp #':'\n bne @loop\n\n jsr vi_command_edition\n cmp #$00 ; A is equal to 0 ? Yes restart keyboard\n beq @loop\n jsr _clrscr_vi\n lda #$01 ; Exit vi\n rts\n\nswitch_to_edition_mode:\n\n jsr vi_clear_command_line\n ldx #$00\n @loop:\n lda msg_insert,x\n beq @out\n sta VI_COMMANDLINE_VIDEO_ADRESS,x\n inx\n .IFPC02\n .pc02\n bra @loop\n .p02\n .else\n jmp @loop\n .endif\n@out:\n txa\n tay\n sty vi_pos_debug\n jsr displays_debug\n\n jsr vi_switch_to_edition_mode\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 12.142857551574707,
"blob_id": "0b48c745a4940513958b01f6047a6b11e81b44c4",
"content_id": "3907f07638e231260bdba5a37f759acab370bb93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 21,
"path": "/docs/commands/lsmem.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# lsmem\n\n## Introduction\n\n Displays malloc table\n\n## SYNOPSYS\n\n+ #lsmem\n\n## DESCRIPTION\n\nDisplays malloc table. Free chunks and busy chuncks are displayed with ranges.\n\n## EXAMPLES\n\n+ lsmem\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/lsmem.asm\n"
},
{
"alpha_fraction": 0.5472972989082336,
"alphanum_fraction": 0.7094594836235046,
"avg_line_length": 36,
"blob_id": "e1b2dc50b988d465fa7f27c3358ef8a09a3e92e3",
"content_id": "888c4389f638b5d11faa7640eadcd058005557df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 4,
"path": "/doxygen/doc/html/search/all_7.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['xpos_5fscreen_85',['xpos_screen',['../structvi__struct__data.html#ac708be4e5e24c529de773f45569bc98d',1,'vi_struct_data']]]\n];\n"
},
{
"alpha_fraction": 0.6872541308403015,
"alphanum_fraction": 0.7053501009941101,
"avg_line_length": 62.54999923706055,
"blob_id": "6cede55300e906820a5eb052d8f5cead9cc9c3ee",
"content_id": "35765cd275f97d28611f3e8e76b5d4683e9e11e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2542,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 40,
"path": "/doxygen/doxybook_output/Files/dir_2288eccfea1af74b995388678c757cc0.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions\n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc](Files/dir_8a0a2fbb0e248d2b08adec17bb698d4e.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/functions/subfunc)** |\n\n## Files\n\n| Name |\n| -------------- |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/_clrscr_vi.s](Files/__clrscr__vi_8s.md#file--clrscr-vi.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/tables.s](Files/tables_8s.md#file-tables.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_command_edition.s](Files/vi__command__edition_8s.md#file-vi-command-edition.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_displays_info.s](Files/vi__displays__info_8s.md#file-vi-displays-info.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_edition_keyboard.s](Files/vi__edition__keyboard_8s.md#file-vi-edition-keyboard.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_editor_switch_off_cursor.s](Files/vi__editor__switch__off__cursor_8s.md#file-vi-editor-switch-off-cursor.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_editor_switch_on_cursor.s](Files/vi__editor__switch__on__cursor_8s.md#file-vi-editor-switch-on-cursor.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_fill_screen_with_empty_line.s](Files/vi__fill__screen__with__empty__line_8s.md#file-vi-fill-screen-with-empty-line.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_put_char.s](Files/vi__put__char_8s.md#file-vi-put-char.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_set_length_file.s](Files/vi__set__length__file_8s.md#file-vi-set-length-file.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_struct.s](Files/vi__struct_8s.md#file-vi-struct.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_switch_to_edition_mode.s](Files/vi__switch__to__edition__mode_8s.md#file-vi-switch-to-edition-mode.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6520912647247314,
"alphanum_fraction": 0.6596958041191101,
"avg_line_length": 19.19230842590332,
"blob_id": "4dff564b70e98282ef3b9f2f2e326c268f09d30b",
"content_id": "2129cbfeb73de782a29f1b82236af02056e27674",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 26,
"path": "/docs/commands/list.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: list\n\n### LIST utility\n\n## SYNOPSYS\n+ list [-c] [+offset] file\n\n## EXAMPLES\n+ list /home/basic11/prog.bas\n+ list +19 scuba.tap\n\n## DESCRIPTION\nSimilar to LIST instruction of the BASIC ROM but for files.\nWith +nnn you can go past the tape header of a tape file.\nYou can use [SPACE] to pause the display ou [CTRL]+C to abort.\n\n## OPTIONS\n* -h\n show help message and exit\n* -c\n Color\n* +nnn\n\t\tskip nnn bytes before starting the listing\n\n## SOURCE\nhttps://github.com/orix-software/list\n\n"
},
{
"alpha_fraction": 0.6514768004417419,
"alphanum_fraction": 0.6658228039741516,
"avg_line_length": 23.673913955688477,
"blob_id": "e68c4d9b90ddf7563ba2e0b24b4697d78a4a4bad",
"content_id": "83600870b51663a238e7f66666ab8811f256bfbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1185,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 46,
"path": "/kernel/docs/primitives/xgetargv.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XGETARGV\r\n\r\n## Description\r\n\r\nGet argv. X register contains the number of the arg search\r\n\r\nKernel handle a struct with XMAINARGS. This struct is handled by Kernel, and no action are required in external code, but here is how struct works :\r\n\r\n``` ca65\r\n.struct XMAINARGS_STRUCT\r\nargv_ptr .res KERNEL_MAX_ARGS_COMMAND_LINE\r\nargv_value_ptr .res KERNEL_LENGTH_MAX_CMDLINE+KERNEL_MAX_ARGS_COMMAND_LINE ; add 0 to string\r\n.endstruct\r\n```\r\nargv_ptr contains an offset of each param. It means that we can't have a length of args greater than 256.\r\n\r\nXMAINARGS and XGETARGV does not handle \"\" yet and \"\\ \"\r\n\r\n\r\n## Input\r\n\r\nA & Y the ptr of struct from XMAINARGS\r\n\r\nX the number of arg to get, first param is 0 not 1 !\r\n\r\n## Output\r\n\r\nA & Y contains argv ptr from xmainargs struct. It returns a copy of the command line with args parsed\r\n\r\n## Example\r\n\r\n``` ca65\r\n XMAINARGS = $2C\r\n XGETARGV = $2E\r\n BRK_KERNEL XMAINARGS\r\n sta debug_mainargs_ptr\r\n sty debug_mainargs_ptr+1\r\n\r\n ldx #$02 ; get arg 2 ; Get the third param\r\n lda debug_mainargs_ptr\r\n ldy debug_mainargs_ptr+1\r\n\r\n BRK_KERNEL XGETARGV\r\n\r\n ; A & Y contains ptr\r\n```\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6495176553726196,
"alphanum_fraction": 0.6495176553726196,
"avg_line_length": 16.176469802856445,
"blob_id": "36f8bf12c74c3e92a748a78bb3e3a56219210571",
"content_id": "c239db84e92c1e6fd69463058c55d4457ab982c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 17,
"path": "/kernel/docs/primitives/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Summary\r\n\r\n## Args from commandline\r\n\r\n* [XMAINARGS](xmainargs) : get command line and build argv/argc\r\n* [XGETARGV](xgetargv) : get an argv from xmainargs struct\r\n\r\n## Numbers\r\n* [XBINDX](xbindx) : convert a number to decimal\r\n\r\n## Memory\r\n* XMALLOC\r\n* XFREE\r\n\r\n## Files\r\n\r\n* [XOPEN](xopen) : Open a file\r\n\r\n"
},
{
"alpha_fraction": 0.631205677986145,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 13.666666984558105,
"blob_id": "12b0e98c095cccd9be33b18253d44aad51e1af5e",
"content_id": "da8dca96eb8503ed1c4796cbf6b1ac698247a8b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 283,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 18,
"path": "/kernel/docs/dynamiclink/dynlibformat.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Dynamic lib format\r\n\r\nRelocatable format.\r\n``` ca65\r\n name_lib\r\n.res 14\r\nversion_lib ; ascii\r\n.res 6\r\nversion_num ;\r\n.res 2\r\nnumber_of_function\r\n.res 1\r\n; définition des fonctions de la lib\r\nentry_1\r\n.res 4 ; offset dans le fichier\r\nentry_2\r\n.res 4 ; offset dans le fichier\r\n```\r\n"
},
{
"alpha_fraction": 0.5489690899848938,
"alphanum_fraction": 0.5695876479148865,
"avg_line_length": 12.370369911193848,
"blob_id": "3baac5c29b29dc50110dcd0c1b79c564ec756103",
"content_id": "7a918148cb36cd026964e085f28969773239cc8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 27,
"path": "/docs/kernel/primitives/xfree.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XFREE\r\n\r\n## Description\r\n\r\nFree memory\r\n\r\n## Input\r\n\r\n* A&Y : ptr\r\n\r\n## Output\r\n\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda ptr\r\n ldy ptr+1\r\n BRK_TELEMON XFREE\r\n rts\r\n```\r\n\r\n!!! tip \"See [mfree](../../../developer_manual/orixsdk_macros/mfree) macro from orix-sdk to use it\"\r\n\r\n!!! fail \"XFREE still have bugs for versions before kernel v2022.2\"\r\n"
},
{
"alpha_fraction": 0.5340501666069031,
"alphanum_fraction": 0.718638002872467,
"avg_line_length": 78.71428680419922,
"blob_id": "06452b1f07659101f4e9427db046b50fa8c58281",
"content_id": "e50118d0fce41e6e22d6a4599e37631e9f529ccc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 558,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 7,
"path": "/doxygen/doc/html/search/variables_1.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['pos_5ffile_152',['pos_file',['../structvi__struct__data.html#aec4b493d747fc7f92ed3b621c3152547',1,'vi_struct_data']]],\n ['pos_5ffile_5faddr_153',['pos_file_addr',['../structvi__struct__data.html#a94dd20e3a32701eb3c4d3ba7d62c28d5',1,'vi_struct_data']]],\n ['posx_5fcommand_5fline_154',['posx_command_line',['../structvi__struct__data.html#af8fc3cc83cc863889636768107d3f2a0',1,'vi_struct_data']]],\n ['ptr_5ffile_5fbegin_155',['ptr_file_begin',['../structvi__struct__data.html#ad5847413cc24f34cca7ddd24eb59fd83',1,'vi_struct_data']]]\n];\n"
},
{
"alpha_fraction": 0.5840708017349243,
"alphanum_fraction": 0.5899705290794373,
"avg_line_length": 13.318181991577148,
"blob_id": "51974423619e0a524f4e3044218115b61b299991",
"content_id": "3dc7444e2091f2157bf6bbfe3df5c24baee3e0f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 22,
"path": "/docs/developer_manual/orixsdk_macros/fclose.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "\r\n# FCLOSE macro\r\n\r\n## Description\r\n\r\nClose an opened file\r\n\r\n## usage\r\n\r\n fclose (fp)\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_file.mac\"\r\n\r\n\r\n fclose (ptr) ; Ptr contains the FD (from fopen macro)\r\n rts\r\n```\r\n\r\nCall [XCLOSE](../../../kernel/primitives/XCLOSE/) kernel function.\r\n"
},
{
"alpha_fraction": 0.4459974467754364,
"alphanum_fraction": 0.46531131863594055,
"avg_line_length": 16.886363983154297,
"blob_id": "61cc7f94336362018f30064412d9276bb9008e26",
"content_id": "f56efa23b11ae38fdd143ee728ea8b19f365ca8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3935,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 220,
"path": "/doxygen/doxybook_output/Files/rom_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/rom.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/rom.s\n\n\n\n## Attributes\n\n| | Name |\n| -------------- | -------------- |\n| int | **[addr_commands](Files/rom_8s.md#variable-addr-commands)** |\n| char[$FFF0- *] | **[end_rom](Files/rom_8s.md#variable-end-rom)** |\n| char[2] | **[parse_vector](Files/rom_8s.md#variable-parse-vector)** |\n| unsigned int | **[signature_adress_commands](Files/rom_8s.md#variable-signature-adress-commands)** |\n| unsigned int | **[list_commands](Files/rom_8s.md#variable-list-commands)** |\n| char | **[number_of_commands](Files/rom_8s.md#variable-number-of-commands)** |\n| int | **[copyright](Files/rom_8s.md#variable-copyright)** |\n| int | **[NMI](Files/rom_8s.md#variable-nmi)** |\n| int | **[RESET](Files/rom_8s.md#variable-reset)** |\n| int | **[BRK_IRQ](Files/rom_8s.md#variable-brk-irq)** |\n\n## Defines\n\n| | Name |\n| -------------- | -------------- |\n| | **[userzp](Files/rom_8s.md#define-userzp)** |\n| | **[NULL](Files/rom_8s.md#define-null)** |\n\n\n\n## Attributes Documentation\n\n### variable addr_commands\n\n```cpp\nint addr_commands;\n```\n\n\n### variable end_rom\n\n```cpp\nchar[$FFF0- *] end_rom;\n```\n\n\n### variable parse_vector\n\n```cpp\nchar[2] parse_vector;\n```\n\n\n### variable signature_adress_commands\n\n```cpp\nunsigned int signature_adress_commands;\n```\n\n\n### variable list_commands\n\n```cpp\nunsigned int list_commands;\n```\n\n\n### variable number_of_commands\n\n```cpp\nchar number_of_commands;\n```\n\n\n### variable copyright\n\n```cpp\nint copyright;\n```\n\n\n### variable NMI\n\n```cpp\nint NMI;\n```\n\n\n### variable RESET\n\n```cpp\nint RESET;\n```\n\n\n### variable BRK_IRQ\n\n```cpp\nint BRK_IRQ;\n```\n\n\n\n## Macros Documentation\n\n### define userzp\n\n```cpp\n#define userzp $80\n```\n\n\n### define NULL\n\n```cpp\n#define NULL 0\n```\n\n\n## Source code\n\n```cpp\n;----------------------------------------------------------------------\n; cc65 includes\n;----------------------------------------------------------------------\n.include \"telestrat.inc\" ; from cc65\n.include \"fcntl.inc\" ; from cc65\n.include \"errno.inc\" ; from cc65\n.include \"cpu.mac\" ; from cc65\n\n\n;----------------------------------------------------------------------\n; Orix SDK includes\n;----------------------------------------------------------------------\n.include \"../dependencies/orix-sdk/macros/SDK.mac\"\n.include \"../dependencies/orix-sdk/include/SDK.inc\"\n.include \"../dependencies/orix-sdk/include/keyboard.inc\"\n\n\n\n\n;----------------------------------------------------------------------\n; Zero Page\n;----------------------------------------------------------------------\nuserzp := $80\nNULL = 0\n\n;----------------------------------------------------------------------\n; Shell\n;----------------------------------------------------------------------\n.org $C000\n\n.code\nstart:\nrts\n\n\n\n\n.include \"commands/vi.s\"\n\n\n\nlist_of_commands_bank:\n .asciiz \"vi\"\n\naddr_commands:\n .word _vi\n\nsignature:\n .asciiz \"vi 2022.4\"\n\n\nend_rom:\n\n\n;----------------------------------------------------------------------\n;\n;----------------------------------------------------------------------\n.out .sprintf(\"Size of ROM : %d bytes\", end_rom-$c000)\n\n\n .res $FFF0-*\n .org $FFF0\n.byt 1 ; Command ROM\n; $fff1\nparse_vector:\n .byt $00,$00\n; fff3\nsignature_adress_commands:\n .addr addr_commands\n; fff5-fff6\nlist_commands:\n .addr list_of_commands_bank\n; fff7\nnumber_of_commands:\n .byt 1\n\n; fff8-fff9\ncopyright:\n .word signature\n; fffa-fffb\nNMI:\n .word start\n\n; fffc-fffd\nRESET:\n .word start\n; fffe-ffff\nBRK_IRQ:\n .word IRQVECTOR\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 21,
"blob_id": "1110ed762cc80328627661e5330f97a719e2925d",
"content_id": "6b00dd777f6d60845421320f31548795d8d9fa11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 18,
"path": "/docs/commands/twil.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# twil\n\n## Introduction\n\nTwil command can displays the current firmware of twilighte card, and\ncan swap root folder to usbkey or sdcard.\n\n## SYNOPSYS\n\n+ /#twil -f : displays Twilighte board firmware\n+ /#twil -u : switch default device : usbdrive\n+ /#twil -s : swap default device to : sdcard\n\n## DESCRIPTION\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/twil.asm\n"
},
{
"alpha_fraction": 0.4278768301010132,
"alphanum_fraction": 0.48622366786003113,
"avg_line_length": 11.854166984558105,
"blob_id": "ce79f0c17f5c7d6fa99b36b49613e270b40ffd01",
"content_id": "5fdc61852550b9b4527ae2fecbbcc0c904be6852",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 617,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 48,
"path": "/doxygen/doxybook_output_vi/Files/vi__check__0A_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_check_0A.s\n\n---\n\n# vi_check_0A.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_0A](Files/vi__check__0A_8s.md#Routine-vi-check-0a)** |\n\n\n## Routine documentation\n\n### Routine vi_check_0A\n\n```ca65\nvi_check_0A\n```\n\n\n\n\n## Source code\n\n```ca65\n\n.proc vi_check_0A\n ; jmp vi_check_0A\n ; $c398\n lda (vi_ptr_file_used),y\n cmp #$0A\n beq @exit_advance_after_LF\n rts\n@exit_advance_after_LF:\n jsr vi_vi_ptr_file_used_plus_plus\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7201257944107056,
"alphanum_fraction": 0.7295597195625305,
"avg_line_length": 27.909090042114258,
"blob_id": "c637de713f8d08a1d29b3a7f068048276c06950b",
"content_id": "b7282df2de65337d23076d5e34ece16f90034084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 11,
"path": "/docs/user_manual/loader.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Loader\n\n{ align=left }\n\nLoader can start atmos or oric-1 tape files.\n\nIf the software is available for Atmos, the tape file will be launched with atmos rom. But if the tape file is Oric-1 only, the software will be launched only in Oric-1 mode\n\n## Usage\n\nPress ++funct+L++ to start loader\n"
},
{
"alpha_fraction": 0.2522522509098053,
"alphanum_fraction": 0.4144144058227539,
"avg_line_length": 6.9285712242126465,
"blob_id": "0557579de88ed8be4c14e3a787923756241f53b9",
"content_id": "41c6f7a90576019aa7eab7c40984bb845c85cbea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 14,
"path": "/doxygen/doxybook_output_vi/Pages/strlen.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: strlen\n\n---\n\n# strlen\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5643879175186157,
"alphanum_fraction": 0.5906200408935547,
"avg_line_length": 19.29032325744629,
"blob_id": "a30de8d1d9fc21e8ccecd13df7686010cddb58d5",
"content_id": "1916827b3ffbfb3d09b0c643d965f37b3bf2fb0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1258,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 62,
"path": "/doxygen/doxybook_output_vi/Files/vi__ypos__screen__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ypos_screen_plus_plus.s\n\n---\n\n# vi_ypos_screen_plus_plus.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ypos_screen_plus_plus](Files/vi__ypos__screen__plus__plus_8s.md#Routine-vi-ypos-screen-plus-plus)**<br>Increment ypos and returns IS_LAST_LINE_OF_SCREEN_TEXT. |\n\n\n## Routine documentation\n\n### Routine vi_ypos_screen_plus_plus\n\n```ca65\nvi_ypos_screen_plus_plus\n```\n\nIncrement ypos and returns IS_LAST_LINE_OF_SCREEN_TEXT. \n\n**See**: [vi_struct](Files/vi_8s.md#variable-vi-struct)\n\n**Return**: A : IS_LAST_LINE_OF_SCREEN_TEXT if we are on the last line of the screen else $00 \n\n\n\n## Source code\n\n```ca65\n;; Increment ypos and returns IS_LAST_LINE_OF_SCREEN_TEXT\n;\n;@return A : IS_LAST_LINE_OF_SCREEN_TEXT if we are on the last line of the screen else $00\n;@see vi_struct\n;;\n.proc vi_ypos_screen_plus_plus\n ; ypos_screen=ypos_screen+1\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n cmp #VI_LAST_LINE_EDITOR\n beq @no_add\n clc\n adc #$01\n sta (vi_struct),y\n lda #$00\n rts\n@no_add:\n ;\n lda #IS_LAST_LINE_OF_SCREEN_TEXT\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5098280310630798,
"alphanum_fraction": 0.5405405163764954,
"avg_line_length": 15.958333015441895,
"blob_id": "e025285de7af5918db5dbbcf8b47444eb7c22455",
"content_id": "9fa80d27f94c19944f3b02045ab5ab27c456b1ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 815,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 48,
"path": "/doxygen/doxybook_output_vi/Files/vi__add__char__to__text_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_add_char_to_text.s\n\n---\n\n# vi_add_char_to_text.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_add_char_to_text](Files/vi__add__char__to__text_8s.md#Routine-vi-add-char-to-text)** |\n\n\n## Routine documentation\n\n### Routine vi_add_char_to_text\n\n```ca65\nvi_add_char_to_text\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_add_char_to_text\n ; Ajoute un caractère dans le texte\n pha\n jsr vi_ptr_last_char_plus_plus\n jsr vi_shift_file_from_memory_one_char ; shift one char the text file in the memory\n ldy #$00\n pla\n sta (vi_ptr_file_used),y ; store \\n\n jsr vi_ptr_file_used_plus_plus\n jsr vi_length_file_plus_plus\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.515802800655365,
"alphanum_fraction": 0.5562579035758972,
"avg_line_length": 15.479166984558105,
"blob_id": "c43741432c12a033180ebb0f5ae86ca1d7e51bbd",
"content_id": "a9841ff10d6b8aa1a7b8d0ce1acc0052847519d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 791,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 48,
"path": "/doxygen/doxybook_output/Files/vi__check__0A_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_0A.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_0A.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_0A](Files/vi__check__0A_8s.md#function-vi-check-0a)**() |\n\n\n## Functions Documentation\n\n### function vi_check_0A\n\n```cpp\nvi_check_0A()\n```\n\n\n\n\n## Source code\n\n```cpp\n\n.proc vi_check_0A\n ; jmp vi_check_0A\n ; $c398\n lda (vi_ptr_file_used),y\n cmp #$0A\n beq @exit_advance_after_LF\n rts\n@exit_advance_after_LF:\n jsr vi_vi_ptr_file_used_plus_plus\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6800109148025513,
"alphanum_fraction": 0.7143245935440063,
"avg_line_length": 33.31775665283203,
"blob_id": "f85c70184fbc6d811d8dee0c5ce2a5a2bebef6be",
"content_id": "5e714ce2926cb12d4831f1e5ed60aacc3fdd8e04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3691,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 107,
"path": "/docs/commands/orixcfg.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# orixcfg\n\n## Introduction\n\nUpdate kernel, shell : orixcfg\n\nWhen a new released is done, you can update the eeprom with the new\nkernel and new roms.\n\nIf you want to know if you need to update the kernel, you can compare\nyour current version, and the last release version. You can go to\n[http://orix.oric.org](http://orix.oric.org/)\n\nThe sequence of the Orix release is Year.X. There is 4 releases a year, and\neach one must be done until you reach the final one, in order to avoid\nsome case. If your version is v2020.3 and the last available version is\nv2021.4. You need to update to v2020.4, then v2021.1, v2021.2, v2021.3,\nv2021.4, v2022.1, v2022.4\n\nIt's maybe possible to jump to version v2022.3, but it's at your own\nrisk because there is a « chance » that some kernel calls changed, and\norixcfg could do wrong step.\n\n\nWhen you need to update kernel, you can update it with orixcfg. You just\nneed to define set 4 on the command line. This step is very\n**dangerous** if you don't load the right file. There is no verification\nand any file on the command line will be load in the kernel set.\nUsually, kernel set file is named kernxxxx.r64.\n\nIf you did something wrong on this step, you won't be able to start orix\nagain. It means that you will need to remove eeprom from the card and\nprogram it with a eeprom programmer\n\nThis next command will load kernel.r64 to kernel set. Please wait until\nOrix reboots. If you have a kernel 2020.3 and you need to load a kernel\n2021,1, you will need to load previous kernel set before the update of\n2021.1.\n\n.r64 extension means that it's a 64KB set. It's usually used to define\nthat the file contains 4 roms of 16KB.\n\nPlease note that we provide 2 kernels version. One named\n« kernelsd.r64 » which means that the default device will be sdcard, and\nthe other one « kernelus.r64 » which means that default device will be\n« usb » (usbkey). If you load the wrong kernel at this step, you can use\ntwil command to switch to the right device, and you can start again\nkernel update with the right file (kernelsd.r64 or kernelus.r64\ndepending of your configuration).\n\norixcfg -r -s 4 kernelsd.r64\n\nLoad a ROM into a ram slot\n----------------------------------------\n\nSpace between values and switches are not optionnal, orixcfg needs\ntheses spaces\n\n/\\#orixcfg -b XX -l myrom.rom\n\nThis command will load myrom.rom (in the current path), in RAM bank XX\n\nOlder usage as : orixcfg -r -s X -b Y myrom.rom is no longer included in\norixcfg since orixcfg v2021.3\n\nLoad a set of ROM into ROM slot\n---------------------------------------------\n\norixcfg -r -s 0 myrom.r64\n\n[]{#anchor-72}This command will load myrom.r64 (in the current path), in\nset 0. For instance, you can not load one bank, you need to load 64KB\nset.\n\nClear bank ram or initialize it\n---------------------------------------------\n\nRam bank are not initialized when the board is tested. If you have\ngarbage on screen when you uses bank (after you used twil -w). You have\nto clear all ram bank (ram bank are battery saved).\n\nIf you want to clear bank 4 of the set 0, you can do this command. You\nneed to do this command for each bank of each set. For instance, there\nis no switch to clear all the ram with one command.\n\n/\\#orixcfg -w -s 0 -b 4 -c\n\nFlush all ram bank\n--------------------------------\n\norixcfg -w -f\n\n## SYNOPSYS\n\n+ orixcfg\nLoad a .rom into RAM (in the example, set 0, bank 4)\n+ orixcfg -w -s 0 -b 4 myrom.rom\nClear a ram bank (in the example, set 0, bank 4)\n+ orixcfg -w -s 0 -b 4 -c\nLoad an eeprom set (in the example, set 0), only 64KB set\n+ orixcfg -r -s 0 myset.r64\nUpdate kernel\n+ orixcfg -r -s 4 kernelus.r64\n\n## DESCRIPTION\n\nManage twilighte board configuration\n"
},
{
"alpha_fraction": 0.48148149251937866,
"alphanum_fraction": 0.5432098507881165,
"avg_line_length": 19.25,
"blob_id": "9ef0815b0a81e07e142209b44a77b1887a070a06",
"content_id": "cf15d70ad583b65c31eba804ec6e38d76a618a20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 4,
"path": "/doxygen/doc/html/search/files_3.js",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['tables_2es_91',['tables.s',['../tables_8s.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.3961038887500763,
"alphanum_fraction": 0.5324675440788269,
"avg_line_length": 12.391304016113281,
"blob_id": "882d946804311f83e94d7b6e69cd180cd5285ab0",
"content_id": "75ae2bfa416c88b54ce409fd390828a72411d82a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 308,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 23,
"path": "/docs/tools_docs/vi/Files/dir_eb94e028ad508402029845f2921e79f7.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /data\n\n---\n\n# /data\n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[/data/vi](Files/dir_834496eb029ed14441e8790c53896f5f.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/data/vi)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.4921405017375946,
"alphanum_fraction": 0.5577333569526672,
"avg_line_length": 132.8441619873047,
"blob_id": "d449af71e91e866fe9e58aa8f9ca53a79a1cd62c",
"content_id": "6359a2bb888c8bd5f9969108dfdd80152008b4c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 10318,
"license_type": "no_license",
"max_line_length": 7288,
"num_lines": 77,
"path": "/doxygen/kernel/xabox_8asm_source.html",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/xhtml;charset=UTF-8\"/>\n<meta http-equiv=\"X-UA-Compatible\" content=\"IE=9\"/>\n<meta name=\"generator\" content=\"Doxygen 1.8.13\"/>\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"/>\n<title>Orix : Kernel: /home/jede/oric/kernel/src/functions/graphics/xabox.asm Source File</title>\n<link href=\"tabs.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<script type=\"text/javascript\" src=\"jquery.js\"></script>\n<script type=\"text/javascript\" src=\"dynsections.js\"></script>\n<link href=\"search/search.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<script type=\"text/javascript\" src=\"search/searchdata.js\"></script>\n<script type=\"text/javascript\" src=\"search/search.js\"></script>\n<link href=\"doxygen.css\" rel=\"stylesheet\" type=\"text/css\" />\n</head>\n<body>\n<div id=\"top\"><!-- do not remove this div, it is closed by doxygen! -->\n<div id=\"titlearea\">\n<table cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr style=\"height: 56px;\">\n <td id=\"projectalign\" style=\"padding-left: 0.5em;\">\n <div id=\"projectname\">Orix : Kernel\n </div>\n </td>\n </tr>\n </tbody>\n</table>\n</div>\n<!-- end header part -->\n<!-- Generated by Doxygen 1.8.13 -->\n<script type=\"text/javascript\">\nvar searchBox = new SearchBox(\"searchBox\", \"search\",false,'Search');\n</script>\n<script type=\"text/javascript\" src=\"menudata.js\"></script>\n<script type=\"text/javascript\" src=\"menu.js\"></script>\n<script type=\"text/javascript\">\n$(function() {\n initMenu('',true,false,'search.php','Search');\n $(document).ready(function() { init_search(); });\n});\n</script>\n<div id=\"main-nav\"></div>\n<!-- window showing the filter options -->\n<div id=\"MSearchSelectWindow\"\n onmouseover=\"return searchBox.OnSearchSelectShow()\"\n onmouseout=\"return searchBox.OnSearchSelectHide()\"\n onkeydown=\"return searchBox.OnSearchSelectKey(event)\">\n</div>\n\n<!-- iframe showing the search results (closed by default) -->\n<div id=\"MSearchResultsWindow\">\n<iframe src=\"javascript:void(0)\" frameborder=\"0\" \n name=\"MSearchResults\" id=\"MSearchResults\">\n</iframe>\n</div>\n\n<div id=\"nav-path\" class=\"navpath\">\n <ul>\n<li class=\"navelem\"><a class=\"el\" href=\"dir_68267d1309a1af8e8297ef4c3efbcdba.html\">src</a></li><li class=\"navelem\"><a class=\"el\" href=\"dir_e823141bb13a34caac0c96ccd0d33fcf.html\">functions</a></li><li class=\"navelem\"><a class=\"el\" href=\"dir_9d75159dca3ab02ab882d4cb5b0c550a.html\">graphics</a></li> </ul>\n</div>\n</div><!-- top -->\n<div class=\"header\">\n <div class=\"headertitle\">\n<div class=\"title\">xabox.asm</div> </div>\n</div><!--header-->\n<div class=\"contents\">\n<div class=\"fragment\"><div class=\"line\"><a name=\"l00001\"></a><span class=\"lineno\"> 1</span> </div><div class=\"line\"><a name=\"l00002\"></a><span class=\"lineno\"> 2</span> ; TRACE UN RECTANGLE ABSOLU </div><div class=\"line\"><a name=\"l00003\"></a><span class=\"lineno\"> 3</span>  </div><div class=\"line\"><a name=\"l00004\"></a><span class=\"lineno\"> 4</span> ;Principe:Par un procédé très astucieux, on va tracer les 4 traits (en absolu) </div><div class=\"line\"><a name=\"l00005\"></a><span class=\"lineno\"> 5</span> ; joignant les 4 points. Voila bien la seule astuce inutile ! Il aurait </div><div class=\"line\"><a name=\"l00006\"></a><span class=\"lineno\"> 6</span> ; été 100 (pourquoi pas 1000 !?) fois plus simple, puisque le rectangle </div><div class=\"line\"><a name=\"l00007\"></a><span class=\"lineno\"> 7</span> ; n<span class=\"stringliteral\">'est fait que de verticales et d'</span>horizontales, de tracer le rectangle </div><div class=\"line\"><a name=\"l00008\"></a><span class=\"lineno\"> 8</span> ; immédiatement en relatif plutot que de passer par des calculs de </div><div class=\"line\"><a name=\"l00009\"></a><span class=\"lineno\"> 9</span> ; tangentes lourds et donnant un résultat connu (0 et infini) !!! </div><div class=\"line\"><a name=\"l00010\"></a><span class=\"lineno\"> 10</span> ; Cette piètre routine nécessite les paramètres comme ABOX dans HRSx. </div><div class=\"line\"><a name=\"l00011\"></a><span class=\"lineno\"> 11</span> ; Notez également l<span class=\"stringliteral\">'utilisation de l'</span>absolu,X plutot que du page 0,X en </div><div class=\"line\"><a name=\"l00012\"></a><span class=\"lineno\"> 12</span> ; $E850... tss tss ! </div><div class=\"line\"><a name=\"l00013\"></a><span class=\"lineno\"> 13</span> </div><div class=\"line\"><a name=\"l00014\"></a><span class=\"lineno\"> 14</span> XABOX_ROUTINE:</div><div class=\"line\"><a name=\"l00015\"></a><span class=\"lineno\"> 15</span>  ldy #$06 ; on place les 4 paramètres (poids faible seulement)</div><div class=\"line\"><a name=\"l00016\"></a><span class=\"lineno\"> 16</span>  ldx #$03 </div><div class=\"line\"><a name=\"l00017\"></a><span class=\"lineno\"> 17</span> LE830</div><div class=\"line\"><a name=\"l00018\"></a><span class=\"lineno\"> 18</span>  lda HRS1,Y ; de HRSx </div><div class=\"line\"><a name=\"l00019\"></a><span class=\"lineno\"> 19</span>  sta DECFIN,X ; dans $06-7-8-9 </div><div class=\"line\"><a name=\"l00020\"></a><span class=\"lineno\"> 20</span>  DEY </div><div class=\"line\"><a name=\"l00021\"></a><span class=\"lineno\"> 21</span>  DEY </div><div class=\"line\"><a name=\"l00022\"></a><span class=\"lineno\"> 22</span>  dex </div><div class=\"line\"><a name=\"l00023\"></a><span class=\"lineno\"> 23</span>  bpl LE830</div><div class=\"line\"><a name=\"l00024\"></a><span class=\"lineno\"> 24</span> LE83A </div><div class=\"line\"><a name=\"l00025\"></a><span class=\"lineno\"> 25</span>  ldx #$03 ; on va tracer 4 traits </div><div class=\"line\"><a name=\"l00026\"></a><span class=\"lineno\"> 26</span> LE83C</div><div class=\"line\"><a name=\"l00027\"></a><span class=\"lineno\"> 27</span>  stx DECDEB+1 ; dans $05 <----------------------------------------</div><div class=\"line\"><a name=\"l00028\"></a><span class=\"lineno\"> 28</span>  lda table_for_rect,X ; on lit le code coordonn?es I </div><div class=\"line\"><a name=\"l00029\"></a><span class=\"lineno\"> 29</span>  sta DECDEB ; dans $04 I</div><div class=\"line\"><a name=\"l00030\"></a><span class=\"lineno\"> 30</span>  ldx #$06 ; on va extraire 8 bits I</div><div class=\"line\"><a name=\"l00031\"></a><span class=\"lineno\"> 31</span> LE845</div><div class=\"line\"><a name=\"l00032\"></a><span class=\"lineno\"> 32</span>  lda #$00 ; A=0 <---------------------------------------- I</div><div class=\"line\"><a name=\"l00033\"></a><span class=\"lineno\"> 33</span>  sta HRS1+1,X ; poids fort HRSx ? 0 et positif I I</div><div class=\"line\"><a name=\"l00034\"></a><span class=\"lineno\"> 34</span>  lsr DECDEB ; on sort 2 bits I I</div><div class=\"line\"><a name=\"l00035\"></a><span class=\"lineno\"> 35</span>  rol ; dans A I I</div><div class=\"line\"><a name=\"l00036\"></a><span class=\"lineno\"> 36</span>  lsr DECDEB ; I I</div><div class=\"line\"><a name=\"l00037\"></a><span class=\"lineno\"> 37</span>  rol ; I I</div><div class=\"line\"><a name=\"l00038\"></a><span class=\"lineno\"> 38</span>  tay ; et Y I I</div><div class=\"line\"><a name=\"l00039\"></a><span class=\"lineno\"> 39</span>  lda $0006,Y ; on lit la coordonn?e correspondante I I</div><div class=\"line\"><a name=\"l00040\"></a><span class=\"lineno\"> 40</span>  sta HRS1,X ; et on stocke dans HRSx I I</div><div class=\"line\"><a name=\"l00041\"></a><span class=\"lineno\"> 41</span>  dex ; I I</div><div class=\"line\"><a name=\"l00042\"></a><span class=\"lineno\"> 42</span>  dex ; I I</div><div class=\"line\"><a name=\"l00043\"></a><span class=\"lineno\"> 43</span>  bpl LE845 ; on fait les 4 coordonn?es ADRAW ------------- I </div><div class=\"line\"><a name=\"l00044\"></a><span class=\"lineno\"> 44</span>  jsr XDRAWA_ROUTINE ; on trace le trait en absolu I </div><div class=\"line\"><a name=\"l00045\"></a><span class=\"lineno\"> 45</span>  ldx DECDEB+1 ; I</div><div class=\"line\"><a name=\"l00046\"></a><span class=\"lineno\"> 46</span>  dex ; I</div><div class=\"line\"><a name=\"l00047\"></a><span class=\"lineno\"> 47</span>  bpl LE83C ; et on fait 4 traits ------------------------------ </div><div class=\"line\"><a name=\"l00048\"></a><span class=\"lineno\"> 48</span>  rts </div><div class=\"line\"><a name=\"l00049\"></a><span class=\"lineno\"> 49</span> table_for_rect:</div><div class=\"line\"><a name=\"l00050\"></a><span class=\"lineno\"> 50</span>  .byt $26,$67,$73,$32</div></div><!-- fragment --></div><!-- contents -->\n<!-- start footer part -->\n<hr class=\"footer\"/><address class=\"footer\"><small>\nGenerated on Sat Apr 25 2020 11:13:32 for Orix : Kernel by  <a href=\"http://www.doxygen.org/index.html\">\n<img class=\"footer\" src=\"doxygen.png\" alt=\"doxygen\"/>\n</a> 1.8.13\n</small></address>\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 11,
"blob_id": "c1433e716eaa39fca7003889a60a840b755d486f",
"content_id": "e9ba816ff5a0c4949010d163df3236cb2efd4a4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 15,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 1,
"path": "/docs/kernel/primitives/xclosedir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XCLOSEDIR\r\n\r\n"
},
{
"alpha_fraction": 0.5149068236351013,
"alphanum_fraction": 0.5409938097000122,
"avg_line_length": 17.941177368164062,
"blob_id": "a0ba563078f416ad97121690b49fc90efb3b5182",
"content_id": "afa71b0a726a7b2f6b9496bb9292b53581d0c4e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1610,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 85,
"path": "/doxygen/doxybook_output/Files/vi__scroll__from__left__to__right__full__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_scroll_from_left_to_right_full_line.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_scroll_from_left_to_right_full_line.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_scroll_from_left_to_right_full_line](Files/vi__scroll__from__left__to__right__full__line_8s.md#function-vi-scroll-from-left-to-right-full-line)**() |\n\n\n## Functions Documentation\n\n### function vi_scroll_from_left_to_right_full_line\n\n```cpp\nvi_scroll_from_left_to_right_full_line()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_scroll_from_left_to_right_full_line\n ; Scroll une ligne Y\n ; insert copy on screen\n ldy #vi_struct_data::xpos_screen ; Get X position\n lda (vi_struct),y ; get value\n sec\n sbc #$01\n sta vi_tmp1 ; Save It\n\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n ; Compute number of char to scroll\n\n\n ldy #$00\n ldx #$00\n@compute:\n lda (vi_ptr1),y\n cmp #CR\n beq @end_compute\n\n inc vi_ptr1\n bne @S2\n inc vi_ptr1+1\n@S2:\n inc vi_ptr1\n inx\n bne @compute\n\n@end_compute:\n txa\n tay\n\n ldy #38\n@L2:\n lda (vi_ptr_screen),y\n iny\n sta (vi_ptr_screen),y\n dey\n dey\n cpy vi_tmp1\n bne @L2\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5984655022621155,
"alphanum_fraction": 0.6035805344581604,
"avg_line_length": 11.964285850524902,
"blob_id": "8babbfa78279624ac8ed75ac8b544cd716a4cdd4",
"content_id": "280aeabdb5b46c8c33a6f7563fb8caa9d3185f70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 28,
"path": "/docs/developer_manual/orixsdk_macros/chdir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# CHDIR macro\r\n\r\n## Description\r\n\r\nChange currrent folder\r\n\r\n## usage\r\n\r\nchdir ptr\r\n\r\nnote:\r\n\r\n* ptr may be: (ptr), address\r\n* Call XPUTCWD function\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_dir.mac\"\r\n\r\n chdir myfolder\r\n rts\r\nmyfolder:\r\n .asciiz \"home\"\r\n```\r\n\r\nCall [XPUTCWD](../../../kernel/primitives/XPUTCWD/) kernel function.\r\n"
},
{
"alpha_fraction": 0.7200000286102295,
"alphanum_fraction": 0.7350000143051147,
"avg_line_length": 37.599998474121094,
"blob_id": "bb5ab3623447d5b36565b3eedb7341e4f6beba08",
"content_id": "fccc34c7eb1643e26c61d9eca5319fdc7da7af1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 5,
"path": "/docs/developer_manual/buildman.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Manual pages\r\n\r\nman binary is available and you can build man pages from .md files\r\n\r\nYou need to use md2hlp program here : [https://github.com/assinie/md2hlp](https://github.com/assinie/md2hlp)\r\n\r\n"
},
{
"alpha_fraction": 0.48356807231903076,
"alphanum_fraction": 0.5273865461349487,
"avg_line_length": 13.199999809265137,
"blob_id": "c7838825d1e73fc2a33f743e81c1ce7d830fa79a",
"content_id": "1221841b5420ba2ec815b371c4cba7e11e4f2353",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 639,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 45,
"path": "/docs/tools_docs/vi/Files/vi__clear__command__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_clear_command_line.s\n\n---\n\n# vi_clear_command_line.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_clear_command_line](Files/vi__clear__command__line_8s.md#Routine-vi-clear-command-line)** |\n\n\n## Routine documentation\n\n### Routine vi_clear_command_line\n\n```ca65\nvi_clear_command_line\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_clear_command_line\n ldx #40\n lda #$00 ; Set command line with 0\n@loop:\n sta VI_COMMANDLINE_VIDEO_ADRESS,x\n dex\n bpl @loop\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5359628796577454,
"alphanum_fraction": 0.5707656741142273,
"avg_line_length": 15.576923370361328,
"blob_id": "62eb976bed5776ec340b023e0587d951b53bc446",
"content_id": "d4fa9ff810fa24c373ad823b2e5968ce7b6211f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 862,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 52,
"path": "/doxygen/doxybook_output/Files/vi__set__length__file_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_set_length_file.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_set_length_file.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_length_file](Files/vi__set__length__file_8s.md#function-vi-set-length-file)**() |\n\n\n## Functions Documentation\n\n### function vi_set_length_file\n\n```cpp\nvi_set_length_file()\n```\n\n\n\n\n## Source code\n\n```cpp\n\n; A,X,Y RES contains the length\n\n.proc vi_set_length_file\n ; set file length A and X contains the value\n\n pha\n jsr populate_tmp0_16_with_ptr_length_file\n pla\n ldy #$00\n sta (tmp0_16),y\n txa\n iny\n sta (tmp0_16),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6151960492134094,
"alphanum_fraction": 0.6348039507865906,
"avg_line_length": 20.054054260253906,
"blob_id": "b0dd0a78be798dc6f5df3b6ac2d0946d7083bdd7",
"content_id": "fd32c7329af7a468e72535131da0b4b4fdfce7dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 816,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 37,
"path": "/docs/developer_manual/orixsdk_macros/scroll.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# scroll macro\r\n\r\n## Description\r\n\r\nPerfoms a scroll text\r\n\r\n## Usage\r\n\r\nscroll [Direction],firstline, last_line_to_scroll\r\n\r\n## Example 1\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n ; Scroll all lines from bottom to the first line : it erases the first line with the second and so open0\r\n scroll up, 0, 26\r\n scroll down, 0, 26\r\n rts\r\n```\r\n\r\n## Example 2\r\n\r\nthe first line to scroll is computed and is in X register\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_display.mac\"\r\n\r\n ; The second arg is empty because we notice to the macro that it's provided by X register\r\n ldx #3\r\n scroll up,, 26\r\n rts\r\n```\r\n\r\n!!! tip \"This only work with X register\"\r\n\r\nCall [XSCROB](../../../kernel/primitives/xscrob/) routine or [XSCROH](../../../kernel/primitives/xscroh/)\r\n"
},
{
"alpha_fraction": 0.6980568170547485,
"alphanum_fraction": 0.7080602645874023,
"avg_line_length": 39.42380905151367,
"blob_id": "e05c291f8292d127b408d1f7b3fa097cae6b574b",
"content_id": "68fca0612d85df82d490419dbc112502a93339ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8969,
"license_type": "no_license",
"max_line_length": 907,
"num_lines": 210,
"path": "/docs/tutorials/autoboot_fr.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# [Orix] Démarrage personnalisé par autoboot et submit\r\n\r\nDepuis la version du kernel v2023.1, il est désormais possible de personnaliser le démarrage d’Orix sur son atmos.\r\n\r\nSubmit est un langage de scripting issu de CPM mais modifié par Assinie. Assine a fait le code pour Orix mais aussi a amélioré le fonctionnement afin d’intégrer des fonctionnalités en plus. Submit est un langage interprété.\r\n\r\nAutoboot est un fichier localisé dans le répertoire “/etc/” et va contenir un script submit. Ce script contenu dans le fichier autoboot sera lancé au boot de l’ordinateur ce qui permettra de personnaliser son boot, faire des menus etc.\r\n\r\nSi le fichier autoboot est présent, alors il sera démarré, sinon, cela démarrera normalement sans personnalisation. S’il y a besoin de bypasser ce démarrage par autoboot, il faudra faire un reset par exemple avec une touche quelconque appuyée. Cela va empêcher autoboot de démarrer et permettre d’accéder au shell directement.\r\n\r\nLe gros avantage du scripting, c’est que cela permet d’aller vite pour faire quelque chose sans avoir beaucoup de connaissances.\r\n\r\nVoici, donc, un tutorial qui va mettre en application un démarrage par autoboot.\r\n\r\n## Prerequis\r\n\r\nAvoir la twilighte board à jour avec le kernel v2023.1\r\nsubmit version v2022.4 (vérifier avec ‘submit’)\r\nun éditeur texte sur pc. Il y a un éditeur texte natif en cours de dev, mais il n’est pas finalisé, et donc, il faudra partir sur la clé usb branchée sur le pc, avec “/etc/autoboot” ouvert.\r\navoir tous les binaires Orix à jour : https://repo.orix.oric.org/dists/official/ sur la clé usb\r\nPour demander au système de ne pas lire le fichier autoboot au démarrage, appuyer sur une touche au boot. Si autoboot doit être désactivé, il suffit d’aller dans /etc/, faire une copie d’autoboot avec ‘cp’, et supprimer le fichier autoboot\r\n\r\nJe veux démarrer blake’s 7 au démarrage\r\nDans le script submit, je mets juste :\r\n\r\n```bash\r\n#!/bin/submit\r\nblakes7\r\n```\r\n\r\nJe sauve le fichier, je mets la clé usb, et je démarre l’oric.\r\n\r\nBlakes7 démarrera automatiquement\r\n\r\n## Démarrage d’un menu personnalisé avec changement de fonte\r\n\r\nCette fois ci, nous allons lancer un menu au boot. Le script suivant va :\r\n\r\neffacer l’écran après le boot, mettre la fonte fanta2uk (les fontes sont localisées dans “/usr/share/fonts”).\r\nafficher un menu avec des entrées “basic, blake’s 7, Psychiatric, barbitoric, et Orix” : ces choix sont volontaires car ils permettent de montrer les différents lancements, que cela soit à partir d’une rom, du storage, d’un jeu, ou d’une démo). La commande basic11 prend un argument qui est le nom du .tap à lancer (on peut le trouver en faisant un basic11 -l)\r\nattendre une touche et lancer le programme sélectionné.\r\n\r\n```bash\r\n#! /bin/submit\r\ncls\r\nsetfont fanta2uk\r\n\r\n@ 10,0\r\ntext\r\n +--------------+\r\n | 1 Basic |\r\n | 2 Blake's 7 |\r\n | 3 Psychatric |\r\n | 4 Barbitoric |\r\n | 5 Orix |\r\n +--------------+\r\nendtext\r\n:start\r\ngetkey\r\nif key = 49 goto _basic\r\nif key = 50 goto _blake\r\nif key = 51 goto _psy\r\nif key = 52 goto _barbitoric\r\nif key = 53 goto prompt\r\n\r\ngoto start\r\n\r\n:_basic\r\nbasic11\r\n\r\n:_blake\r\nblakes7\r\n\r\n:_barbitoric\r\nbarboric\r\n\r\n:_psy\r\nbasic11 \"PSY\r\n\r\n:prompt\r\nexit\r\n\r\n```\r\n\r\nLe résultat en image:\r\n\r\n\r\nLe résultat en vidéo:\r\n\r\n\r\nCe script va lancer, en fonction de la touche pressée, l’entrée voulue (rom basic, blake, psychiatric, barbitoric ou revenir au shell). En revanche, une entrée de fonctionnera pas, c’est barbitoric (pour la version disponible au 7/06/2023).\r\n\r\nPour expliquer pourquoi barbitoric ne fonctionne pas dans le cas où il est localisé dans un script submit, il faut se référer au fonctionnement d’un binaire Orix. Le problème ne se produit pas pour une commande en ROM, mais il peut se produire pour un binaire sur clé usb/sdcard.\r\n\r\nAu début du développement d’Orix, un seul format de binaire existait, c’est le fameux format statique que nous connaissons pour du basic, hyperbasic, commandes sedoric etc. Les programmes sont assemblés à une adresse fixe, et il y a conflit si on essaie de charger un programme qui est dans la même plage d’adresse : l’un va écraser l’autre. C’est ce qu’il se passe dans le cas de submit et barbitoric, mais ce n’est pas submit le fautif, c’est le binaire barbitoric en natif Orix. Submit est un binaire sur disque au format 2 Orix. Ce format est relogeable. Le kernel va allouer la plage mémoire disponible en fonction de la taille et reloger au runtime le binaire pour que submit puisse tourner à une nouvelle adresse. Submit tourne donc à une adresse inconnue par l’utilisateur, seul le kernel sait où submit est localisé en RAM. Aussi, Submit va faire des allocations mémoire pour sa propre utilisation.\r\n\r\nIci, barbitoric n’est pas au format relogeable, et quand le kernel va essayer de le lancer, il va regarder s’il est au format 2, s’il est au format 1, le header du binaire définit une adresse fixe. Le kernel va vérifier si l’adresse est occupée. Comme submit a été lancé de manière relogée au 1er offset mémoire libre, il y a conflit et le kernel va renvoyer un “exec format error” qui sera affiché dans submit.\r\n\r\nVoici comment vérifier si un binaire est au format relogeable : Il faut utiliser le binaire file.\r\n\r\nVous verrez que file n’indique pas que c’est un binaire relogeable pour barboric (nom du programme barbitoric) :\r\n\r\n```bash\r\n/#cd bin\r\n/bin#file barboric\r\n```\r\n\r\nCi dessous, en rouge, le test sur le binaire barbitoric, en vert le même test sur le programme blakes7. Nous voyons bien dans ce cas “reloc binary” en vert\r\n\r\nPour corriger le problème de barbitoric dans le cas de submit, il faut attendre la sortie de la version relogeable. Pour cela, le binaire est converti par un programme après un 1er build du code source.\r\n\r\nLes commandes en ROM n’ont pas besoin d’être relogées. Elles sont déjà résidentes en ROM, et ont été conçues pour ne pas écraser les autres commandes. Les commandes sont déjà dans des emplacements mémoire fixes, et gèrent les allocations mémoire de leur coté. De façon générale, les commandes en ROM sont plus rapides car elles sont déjà chargées, n’ont pas besoin d’être relogées et n’occupent pas de RAM dans les 48KB pour son propre code (à part pour les allocations mémoires nécessaires)\r\n\r\nCouleurs, caractères de contrôles\r\nAvant d’aller plus loin, l’idéal est de lire la doc “subdoc” en utilisant man. C’est une doc en français qui explique les possibilités de submit\r\n\r\n```bash\r\n/#man subdoc\r\n```\r\n\r\nNous avons besoin de personnaliser un peu plus ce menu, et nous allons afficher un texte double hauteur avec quelques couleurs. Cela se fait avec les caractères de contrôle :\r\n```bash\r\nink_black = ^@\r\nink_red = ^A\r\nink_green = ^B\r\nink_yellow = ^C\r\nink_blue = ^D\r\nink_purple = ^E\r\nink_cyan = ^F\r\nink_white = ^G\r\n\r\nsimple_height = ^H\r\nsimple_graph = ^I\r\ndouble_height = ^J\r\nblink_simple = ^L\r\nblink_double = ^N\r\n\r\npaper_black = ^P\r\npaper_red = ^Q\r\npaper_green = ^R\r\npaper_yellow = ^S\r\npaper_blue = ^T\r\npaper_purple = ^U\r\npaper_cyan = ^V\r\npaper_white = ^W\r\n```\r\n\r\nNous allons modifier autoboot, pour afficher un bandeau avec sur fond bleu et en écriture rouge le mot menu en double hauteur, puis afficher un encadré magenta avec le menu.\r\n\r\n```bash\r\n#! /bin/submit\r\ncls\r\nsetfont fanta2uk\r\necho ^[T +-----------------------------------+\r\necho ^[T^[J|^[A Menu ^[G |\r\necho ^[T^[J|^[A Menu ^[G |\r\necho ^[T +-----------------------------------+\r\n\r\necho\r\n\r\necho ^[U\r\necho ^[U ^[P 1 Basic ^[U\r\necho ^[U ^[P 2 Blake's 7 ^[U\r\necho ^[U ^[P 3 Psychatric ^[U\r\necho ^[U ^[P 4 Barbitoric ^[U\r\necho ^[U ^[P 5 Orix ^[U\r\necho ^[U\r\n\r\n:start\r\ngetkey\r\nif key = 49 goto _basic\r\nif key = 50 goto _blake\r\nif key = 51 goto _psy\r\nif key = 52 goto _barbitoric\r\nif key = 53 goto prompt\r\n\r\ngoto start\r\n\r\n:_basic\r\nbasic11\r\n\r\n:_blake\r\nblakes7\r\n\r\n:_barbitoric\r\nbarboric\r\n\r\n:_psy\r\nbasic11 \"PSY\r\n\r\n:prompt\r\nexit\r\nLe résultat\r\n```\r\n\r\n## Pour aller plus loin\r\n\r\nSubmit gère des variables, il est possible de les définir et de les sauver dans un fichier texte (save to myfile) et les restaurer. (restore from myfile)\r\n\r\nSubmit lit des arguments en paramètre. Ainsi, un script en ligne de commande lancé par submit tel que\r\n\r\n```bash\r\n/#submit monscript.sub toto\r\n```\r\n\r\n“toto” sera dans la variable $1, permettant de faire des tests avec if.\r\n\r\nIl est possible de tester la présence d’un fichier (if exist myfile.txt goto start)\r\n\r\nD’afficher un prompt avec input, et récupérer ce qui a été saisi dans une variable."
},
{
"alpha_fraction": 0.45696067810058594,
"alphanum_fraction": 0.49309244751930237,
"avg_line_length": 13.703125,
"blob_id": "adba1bd26360ce7e836ab664a4db2f57e3cf73eb",
"content_id": "3fa439b998f81edda192fed17b89c05a4483ca04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1882,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 128,
"path": "/docs/tools_docs/vi/Files/vi__compute__last__text__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_compute_last_text_line.s\n\n---\n\n# vi_compute_last_text_line.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_compute_last_text_line](Files/vi__compute__last__text__line_8s.md#Routine-vi-compute-last-text-line)** |\n\n\n## Routine documentation\n\n### Routine vi_compute_last_text_line\n\n```ca65\nvi_compute_last_text_line\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_compute_last_text_line\n ; A & Y the current ptr\n ; X the id of the text line ?\n sta vi_ptr2\n sty vi_ptr2+1\n\n stx vi_tmp2\n\n lda #VI_LAST_LINE_EDITOR\n sec\n sbc vi_tmp2\n sta vi_tmp2\n\n\n@L1:\n\n\n jsr check_eof_vi_ptr2\n cmp #IS_EOF\n beq @exit\n ldy #$00\n lda (vi_ptr2),y\n cmp #CR\n beq @found\n\n@continue:\n inc vi_ptr2\n bne @out_compare\n inc vi_ptr2+1\n@out_compare:\n jmp @L1\n\n@found:\n\n inc vi_ptr2\n bne @out_compare2\n inc vi_ptr2+1\n@out_compare2:\n\n jsr check_eof_vi_ptr2\n cmp #IS_EOF\n beq @exit\n\n lda (vi_ptr2),y\n cmp #LF\n bne @S1\n\n inc vi_ptr2\n bne @out_compare3\n inc vi_ptr2+1\n@out_compare3:\n\n jsr check_eof_vi_ptr2\n cmp #IS_EOF\n beq @exit\n\n\n@S1:\n dec vi_tmp2\n bne @continue\n@exit:\n lda vi_ptr2\n ldy vi_ptr2+1\n\n rts\n\n\ncheck_eof_vi_ptr2:\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr2\n cmp vi_tmp1\n bne @not_eof\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr2+1\n cmp vi_tmp1\n bne @not_eof\n\n lda #IS_EOF\n rts\n\n@not_eof:\n lda #$01\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.4545454680919647,
"alphanum_fraction": 0.4545454680919647,
"avg_line_length": 7,
"blob_id": "573191bd985c686597181e37ec11bd4d613adbea",
"content_id": "2812be16bda804bb14b30aec3945b810f906b6de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11,
"license_type": "no_license",
"max_line_length": 7,
"num_lines": 1,
"path": "/kernel/docs/primitives/xopen.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XOPEN\r\n\r\n"
},
{
"alpha_fraction": 0.5335254669189453,
"alphanum_fraction": 0.5542747378349304,
"avg_line_length": 16.94827651977539,
"blob_id": "ab3f0063efbfe9e32d474ae5c32dd853d5bf0c49",
"content_id": "0ad7a177f14222c4976f3aaaa0499d6b26810fce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5205,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 290,
"path": "/docs/tools_docs/vi/Files/vi__key__del_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_key_del.s\n\n---\n\n# vi_key_del.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_manage_del](Files/vi__key__del_8s.md#Routine-vi-manage-del)** |\n| | **[vi_remove_char_in_text_file](Files/vi__key__del_8s.md#Routine-vi-remove-char-in-text-file)** |\n| | **[vi_decal_text_left_to_right](Files/vi__key__del_8s.md#Routine-vi-decal-text-left-to-right)** |\n\n\n## Routine documentation\n\n### Routine vi_manage_del\n\n```ca65\nvi_manage_del\n```\n\n\n### Routine vi_remove_char_in_text_file\n\n```ca65\nvi_remove_char_in_text_file\n```\n\n\n### Routine vi_decal_text_left_to_right\n\n```ca65\nvi_decal_text_left_to_right\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_manage_del\n ; Check if we are at the beginning of the file\n\n jsr vi_check_beginning_of_file\n cmp #IS_BEGINNING_OF_THE_FILE\n bne @delete_char\n rts\n\n@delete_char:\n jsr vi_editor_switch_off_cursor\n\n ; if xpos==0 then goto @first_colomn else goto @not_first_column\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n cmp #$00 ; First column ?\n bne @not_first_column\n ; First column\n@first_colomn:\n ; Does a $0D under the cursor (X=0 and current_char==0x0D ?) yes, scroll and remove the char\n ; if file[posfile]=='\\n' then goto\n\n\n ldy #$01\n lda (vi_ptr_file_used),y\n cmp #LF\n bne @remove_char_from_the_first_column\n ; remove 0A\n\n jsr vi_remove_char_in_text_file\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n bne @do_not_move_twice\n\n ; for $0A\n jsr vi_remove_char_in_text_file\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n bne @check_0A\n jsr vi_ptr_file_used_plus_plus\n\n@check_0A:\n\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #LF\n bne @not_0A\n jsr vi_ptr_file_used_plus_plus\n\n@not_0A:\n\n\n@do_not_move_twice:\n\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n tax\n ;inx\n scroll up,,26 ; Because the second arg is provided\n ;jsr vi_ypos_screen_sub_sub\n\n\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n tax\n inx\n lda vi_ptr_file_used\n ldy vi_ptr_file_used+1\n\n\n jsr vi_compute_last_text_line\n jsr vi_fill_last_line\n rts\n\n@not_first_column:\n tay\n jsr vi_scroll_to_left\n\n jsr vi_xpos_screen_sub_sub\n\n@dec_ptr_and_move_file:\n jsr vi_remove_char_in_text_file\n\n@out:\n rts\n\n\n@remove_char_from_the_first_column:\n ; For 0A\n\n\n ; Editor management part\n\n ; search the last char of the previous line to concat\n\n lda vi_ptr_file_used\n ldy vi_ptr_file_used+1\n\n jsr vi_search_previous_cr ; Search the previous \\n\n\n ; Don't put anything here, a and Y are kept for next vi_search_previous_cr\n\n\n jsr vi_search_previous_cr ; Search the first previous line \\n\n\n stx vi_tmp3 ; Save the number of char found\n\n jsr vi_remove_char_in_text_file\n\n ; For 0D\n jsr vi_remove_char_in_text_file\n\n jsr vi_ypos_screen_sub_sub\n\n jsr vi_compute_video_adress\n\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n@L1:\n ldy #$00\n lda (vi_ptr1),y\n cmp #CR\n beq @exit\n ldy vi_tmp3\n\n sta (vi_ptr_screen),y\n\n inc vi_ptr_screen\n bne @out_compare\n inc vi_ptr_screen+1\n@out_compare:\n\n\n inc vi_ptr1 ; 98\n bne @out_compare2\n inc vi_ptr1+1\n@out_compare2:\n\n jmp @L1\n\n\n@exit:\n lda vi_tmp3\n jsr vi_set_xpos_from_A\n\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n tax\n inx\n scroll up,,26 ; Because the second empty arg is provided\n\n rts\n\n\n\n ; And of previous empty line management\n\n@not_an_empty_line:\n jsr vi_ypos_screen_sub_sub\n\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n tax\n\n scroll up,,26 ; Because the second empty arg is provided\n\n rts\n\n\n.endproc\n\n.proc vi_remove_char_in_text_file\n ; Remove a char in the text file in the current position\n jsr vi_ptr_file_used_sub_sub\n jsr vi_ptr_last_char_sub_sub\n jsr vi_decal_text_left_to_right\n jsr vi_length_file_sub_sub\n rts\n.endproc\n\n.proc vi_decal_text_left_to_right\n lda vi_ptr_file_used\n sta vi_ptr1\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n@restart:\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1\n cmp vi_tmp1\n bne @not_eof\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1+1\n cmp vi_tmp1\n bne @not_eof\n\n ldy #$01\n lda (vi_ptr1),y\n dey\n sta (vi_ptr1),y\n\n inc vi_ptr1\n bne @out2\n inc vi_ptr1+1\n@out2:\n\n rts\n\n\n@not_eof:\n ldy #$01\n lda (vi_ptr1),y\n dey\n sta (vi_ptr1),y\n\n inc vi_ptr1\n bne @out\n inc vi_ptr1+1\n@out:\n jmp @restart\n\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6058394312858582,
"alphanum_fraction": 0.6149635314941406,
"avg_line_length": 15.677419662475586,
"blob_id": "7cd3e4704a05ef67578cca3c6ea89bf5f27006c2",
"content_id": "e5621d5abd2e4c99de6f2d3325e6bd461e8446b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 548,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 31,
"path": "/docs/kernel/primitives/xmkdir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XMKDIR (mkdir)\r\n\r\nID primitive : $4B\r\n\r\n## Description\r\n\r\nCreate a folder. But it does not support absolute path for instance. It can only create path in the current path.\r\n\r\n## Input\r\n\r\nA and Y : ptr of the string\r\n\r\n## Output\r\n\r\nReturn #ENODEV if the device can't be mount\r\n\r\n## Usage\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<str\r\n ldy #>str\r\n ldx #$00\r\n BRK_TELEMON XMKDIR\r\n rts\r\nstr:\r\n .asciiz \"myfolder\"\r\n```\r\n\r\n!!! tip \"See [mkdir](../../../developer_manual/orixsdk_macros/mkdir) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.49432533979415894,
"alphanum_fraction": 0.62211012840271,
"avg_line_length": 41.46428680419922,
"blob_id": "10d6d3c16680fa4a1e9f145c9006f08d55ee5b92",
"content_id": "1bb478e5d37b0b0163d833583fbf2500b8ba6aa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2379,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 56,
"path": "/docs/commands/all.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "* [asm2k2](../asm2k2) Last version : 2021.1\n* [bank](../bank) Last version : 2023.1\n* [basic10](../basic10) Last version : 2023.1\n* [basic11](../basic11) Last version : 2023.1\n* [barboric](../barboric) Last version : 2022.1\n* [blakes7](../blakes7) Last version : 2022.4\n* [bootfd](../bootfd) Last version : 2021.1\n* [born1983](../born1983) Last version : 2022.1\n* [cat](../cat) Last version : 2023.1\n* [cd](../cd) Last version : 2023.1\n* [cksum](../cksum) Last version : 2023.2\n* [clear](../clear) Last version : 2023.1\n* [cp](../cp) Last version : 2023.1\n* [df](../df) Last version : 2023.1\n* [otimer](../otimer) Last version : 2023.1\n* [dsk-util](../dsk-util) Last version : 2023.2\n* [echo](../echo) Last version : 2023.1\n* [env](../env) Last version : 2023.1\n* [forth](../forth) Last version : 2020.1\n* [ftdos](../ftdos) Last version : 2022.3\n* [grep](../grep) Last version : 2022.2\n* [help](../help) Last version : 2023.1\n* [hexdump](../hexdump) Last version : 2023.2\n* [ioports](../ioports) Last version : 2023.1\n* [list](../list) Last version : 2023.2\n* [ls](../ls) Last version : 2023.1\n* [lscpu](../lscpu) Last version : 2023.1\n* [lsmem](../lsmem) Last version : 2023.1\n* [loader](../loader) Last version : 2022.3\n* [man](../man) Last version : 2023.1\n* [mkdir](../mkdir) Last version : 2023.1\n* [mount](../mount) Last version : 2023.1\n* [more](../more) Last version : 2023.2\n* [orixcfg](../orixcfg) Last version : 2023.1\n* [pwd](../pwd) Last version : 2023.1\n* [ps](../ps) Last version : 2023.1\n* [quintes](../quintes) Last version : 2022.1\n* [raw2dsk](../raw2dsk) Last version : 2023.1\n* [readdsk](../readdsk) Last version : 2023.1\r\n* [reboot](../reboot) Last version : 2023.1\n* [rm](../rm) Last version : 2023.1\n* [setfont](../setfont) Last version : 2023.1\n* [loader](../loader) Last version : 2022.3\n* [twilconf](../twilconf) Last version : 2022.3\n* [strerr](../strerr) Last version : 2023.2\n* [sh](../sh) Last version : 2023.1\n* [submit](../submit) Last version : 2022.2\n* [touch](../touch) Last version : 2023.1\n* [twil](../twil) Last version : 2023.1\n* [twiload](../twiload) Last version : 2022.3\n* [uname](../uname) Last version : 2023.1\n* [untar](../untar) Last version : 2022.2\n* [vidplay](../vidplay) Last version : 2022.3\n* [viewscr](../viewscr) Last version : 2023.2\n* [viewhrs](../viewhrs) Last version : 2023.1\n* [zerofx](../zerofx) Last version : 2022.2\n"
},
{
"alpha_fraction": 0.5674470663070679,
"alphanum_fraction": 0.5897436141967773,
"avg_line_length": 18.5,
"blob_id": "42a8218714e61f6bb45e5128a5f3cfdb942ce39f",
"content_id": "aafb99590cf48dc3d93cb4f3e1efaf660c0f7b06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 897,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 46,
"path": "/doxygen/doxybook_output/Files/vi__vi__ptr__file__used__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_vi_ptr_file_used_plus_plus.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_vi_ptr_file_used_plus_plus.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_vi_ptr_file_used_plus_plus](Files/vi__vi__ptr__file__used__plus__plus_8s.md#function-vi-vi-ptr-file-used-plus-plus)**() |\n\n\n## Functions Documentation\n\n### function vi_vi_ptr_file_used_plus_plus\n\n```cpp\nvi_vi_ptr_file_used_plus_plus()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_vi_ptr_file_used_plus_plus\n ; A the char to add\n ; Insert into file (memory) char\n inc vi_ptr_file_used\n bne @no_inc\n inc vi_ptr_file_used+1\n@no_inc:\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5209302306175232,
"alphanum_fraction": 0.5476744174957275,
"avg_line_length": 15.226414680480957,
"blob_id": "5ba9b3b01eabb52f11735503690c0cd1dfda75e5",
"content_id": "9af9396f1968b3aefa80a32db30de72cad818174",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 860,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 53,
"path": "/doxygen/doxybook_output/Files/vi__scroll__to__left_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_scroll_to_left.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_scroll_to_left.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_scroll_to_left](Files/vi__scroll__to__left_8s.md#function-vi-scroll-to-left)**() |\n\n\n## Functions Documentation\n\n### function vi_scroll_to_left\n\n```cpp\nvi_scroll_to_left()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_scroll_to_left\n pha\n\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n@L3:\n lda (vi_ptr_screen),y\n dey\n sta (vi_ptr_screen),y\n iny\n iny\n cpy #39\n bne @L3\n pla\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.529644250869751,
"alphanum_fraction": 0.5533596873283386,
"avg_line_length": 16.88888931274414,
"blob_id": "529cc0a88a4662621968a9a99970a06714aa950c",
"content_id": "c5baaa79833af14940efd3886df34a8e10d9bd7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1771,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 99,
"path": "/doxygen/doxybook_output/Files/vi__display__file__opened_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_display_file_opened.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_display_file_opened.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_display_file_opened](Files/vi__display__file__opened_8s.md#function-vi-display-file-opened)**() |\n\n\n## Functions Documentation\n\n### function vi_display_file_opened\n\n```cpp\nvi_display_file_opened()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_display_file_opened\n\n@loop:\n\n ; Do we reached eof ?\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1\n cmp vi_tmp1\n bne @not_eof\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1+1\n cmp vi_tmp1\n bne @not_eof\n\n rts\n\n@not_eof:\n ldy #$00\n lda (vi_ptr1),y\n\n ldx #VI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE\n jsr vi_put_char\n\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n cmp #VI_LAST_LINE_EDITOR\n beq @compute_empty_line\n\n inc vi_ptr1\n bne @S30\n inc vi_ptr1+1\n@S30:\n\n jmp @loop\n\n@do_not_add_high_to_ptr:\n\n lda vi_length_file_compute\n bne @dec_low\n dec vi_length_file_compute+1\n\n@dec_low:\n dec vi_length_file_compute\n bne @loop\n lda vi_length_file_compute+1\n bne @loop\n\n\n@compute_empty_line:\n ; display last line\n lda vi_ptr1\n ldy vi_ptr1+1\n jsr vi_fill_last_line\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5090726017951965,
"alphanum_fraction": 0.5292338728904724,
"avg_line_length": 18.45098114013672,
"blob_id": "ef85e3f52538aec6293a37d48bb68377af9769d1",
"content_id": "be75587ed23016b6af77ea5de4b23b7e2ecc0f01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 51,
"path": "/doxygen/doxybook_output/Files/__clrscr__vi_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/_clrscr_vi.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/_clrscr_vi.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[_clrscr_vi](Files/__clrscr__vi_8s.md#function--clrscr-vi)**() |\n\n\n## Functions Documentation\n\n### function _clrscr_vi\n\n```cpp\n_clrscr_vi()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc _clrscr_vi\n ; Switch to text mode\n BRK_TELEMON(XTEXT)\n\n lda #<SCREEN ; Get position screen\n ldy #>SCREEN\n sta RES\n sty RES+1\n\n ldy #<(SCREEN+SCREEN_XSIZE*SCREEN_YSIZE)\n ldx #>(SCREEN+SCREEN_XSIZE*SCREEN_YSIZE)\n lda #' '\n BRK_TELEMON XFILLM ; Calls XFILLM : it fills A value from RES address and size of X and Y value\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.4517304301261902,
"alphanum_fraction": 0.4936247766017914,
"avg_line_length": 12.390243530273438,
"blob_id": "1e69fcce567101c076ec239fe27d04cfb419c0ae",
"content_id": "8fc0407f8577c172bb87a3057b3aaea788a137f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 549,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 41,
"path": "/doxygen/doxybook_output_vi/Files/vi__set__xpos__from__A_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_set_xpos_from_A.s\n\n---\n\n# vi_set_xpos_from_A.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_xpos_from_A](Files/vi__set__xpos__from__A_8s.md#Routine-vi-set-xpos-from-a)** |\n\n\n## Routine documentation\n\n### Routine vi_set_xpos_from_A\n\n```ca65\nvi_set_xpos_from_A\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_set_xpos_from_A\n ldy #vi_struct_data::xpos_screen\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5048543810844421,
"alphanum_fraction": 0.5395284295082092,
"avg_line_length": 14.020833015441895,
"blob_id": "ea6c2f9613b972641de44ea461a06ecb14290496",
"content_id": "e4caed916368e4375503233f7e69ceea690a7997",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 721,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 48,
"path": "/docs/tools_docs/vi/Files/vi__editor__switch__on__cursor_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_editor_switch_on_cursor.s\n\n---\n\n# /Routines/vi_editor_switch_on_cursor.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_editor_switch_on_cursor](Files/vi__editor__switch__on__cursor_8s.md#Routine-vi-editor-switch-on-cursor)** |\n\n\n## Routine documentation\n\n### Routine vi_editor_switch_on_cursor\n\n```ca65\nvi_editor_switch_on_cursor\n```\n\n\n\n\n## Source code\n\n```ca65\n\n.proc vi_editor_switch_on_cursor\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n\n lda (vi_ptr_screen),y\n ora #$80\n sta (vi_ptr_screen),y\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.577102780342102,
"alphanum_fraction": 0.5957943797111511,
"avg_line_length": 12.266666412353516,
"blob_id": "bdfd085df0a3a89069836d40896552de96ebef9e",
"content_id": "e5a49445604b602df76ca0f06ab63ec32862c1bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 30,
"path": "/docs/developer_manual/orixsdk_macros/atoi.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# atoi macro\r\n\r\n## Description\r\n\r\nConvert a string to a 16 bits number\r\n\r\n## Usage\r\n\r\natoi [ptr]\r\n\r\nptr may be: (ptr), address\r\n\r\n## Output\r\n\r\nAY: value\r\nX: number of digits (ie 10^X)\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_misc.mac\"\r\n\r\n atoi mystrnumber\r\n rts\r\nmystrnumber:\r\n .asciiz \"12\"\r\n```\r\n\r\nCall [XDECAY](../../../kernel/primitives/xdecay/) kernel function.\r\n"
},
{
"alpha_fraction": 0.6268292665481567,
"alphanum_fraction": 0.6560975313186646,
"avg_line_length": 11.600000381469727,
"blob_id": "0bc888b42b11aa167e82e8fd41f5a130fc51a943",
"content_id": "b2cf00fedb8beb90f9fcbfe57f534ff6457540c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 30,
"path": "/docs/tutorials/basic11_programming_fr.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Programmer en basic sur la machine\r\n\r\n* Démarrer basic11 au shell :\r\n\r\n/#basic11\r\n\r\n* taper son programme :\r\n10 ?\"Salut\r\n\r\n* Sauvegarder :\r\n\r\nCSAVE\"PRG\r\n\r\n* quitter\r\n\r\ntaper quit ou appuyer sur reset\r\n\r\n* sur le shell voir la présence de son fichier dans le bon folder :\r\n\r\n/#cd home/basic11\r\n/#ls PRG.*\r\n\r\n=> il devrait être présent.\r\n\r\n* Revenir au basic11 et le charger\r\n\r\n/#basic11\r\n\r\nCLOAD\"PRG\r\nLIST\r\n\r\n"
},
{
"alpha_fraction": 0.555685818195343,
"alphanum_fraction": 0.5803048014640808,
"avg_line_length": 16.77083396911621,
"blob_id": "1c8d0de65442e2f6725db7b95a5f2da6eac43071",
"content_id": "334d2ae6591010a33cc5b37afadb2a00c441ad86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 48,
"path": "/doxygen/doxybook_output/Files/vi__editor__switch__on__cursor_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_editor_switch_on_cursor.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/vi_editor_switch_on_cursor.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_editor_switch_on_cursor](Files/vi__editor__switch__on__cursor_8s.md#function-vi-editor-switch-on-cursor)**() |\n\n\n## Functions Documentation\n\n### function vi_editor_switch_on_cursor\n\n```cpp\nvi_editor_switch_on_cursor()\n```\n\n\n\n\n## Source code\n\n```cpp\n\n.proc vi_editor_switch_on_cursor\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n\n lda (vi_ptr_screen),y\n ora #$80\n sta (vi_ptr_screen),y\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7472312450408936,
"alphanum_fraction": 0.7543973922729492,
"avg_line_length": 71.0952377319336,
"blob_id": "339c70b82b9bda64b118627f6df638978f0e83d4",
"content_id": "8361323836477656baa90b30aa0a81ab4d23f0cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1535,
"license_type": "no_license",
"max_line_length": 449,
"num_lines": 21,
"path": "/docs/developer_manual/binary_starts.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Binary start sequence\r\n\r\nAll commands are started with XEXEC kernel primitive. For example, when something is typed into shell, shell calls XEXEC kernel primitive (except for shell internal commands like echo, pwd ...).\r\n\r\nWhen a binary is executed, kernel fork a new process with its PID, PPID, CWD\r\n\r\n## Step 1 : check if binary is in a bank\r\n\r\nXEXEC reads all banks from 4 to 1 and tries to check if the program is in a ROM (that is why ROM needs to have a kind of header)\r\n\r\nIf the commands is in a bank, it switches to the right bank and launch command vector (and it forks). If the commands is not found, it swaps \"Twilighte banking register\" to provide others 65KB of bank, kernel checks banks again. When kernel reached the 32 banks of rom, it swap to ram set, and reads all theses 32 banks of RAM. If the command is not found, kernel send error code, and shell will display error message according to kernel error code.\r\n\r\nThat is why commands located in bank (eeprom and after RAM) are quicker to start. EEPROM banks are quicker to launch because kernel starts with eeprom set.\r\n\r\n## Step 2 : Check if the binary is on the device\r\n\r\nIf the binary is not in a bank, kernel will try to open \"/bin/BINARY_FILE\". If the binary open fails, it returns \"command not found\" or impossible to mount if device is not present.\r\n\r\n## Step 3 : binary on storage device is found\r\n\r\nKernel opens the file, reads the header, and loads the content of the binary on the right address. When it's OK, the binary is started (and a fork occurs)\r\n"
},
{
"alpha_fraction": 0.6566901206970215,
"alphanum_fraction": 0.7306337952613831,
"avg_line_length": 35.733333587646484,
"blob_id": "8a57c0b0fab9bcab4d79d3d930e117c6897a6f52",
"content_id": "fff8afd26f1b458f6247c52aa96e576dba97e0a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 15,
"path": "/docs/user_manual/quickstart.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Quick Start\r\n\r\nDefault storage : the kernel is set with usb storage.\r\n\r\nThe manual is : https://orix-software.github.io/twilighte_manual.pdf\r\n\r\nFor board with 39SF040 eeprom, only use orixcfg equal or greater than 2023.2. Check version under Orix with : orixcfg -v. Beta version is here: http://repo.orix.oric.org/dists/alpha/tgz/6502/orixcfg.tgz\r\n\r\n## How the board must be plugged\r\n\r\n\r\n\r\n## How the board must be plugged when board does not with the oric (keyboard is lost or prompt does not appears)\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.760869562625885,
"alphanum_fraction": 0.7763975262641907,
"avg_line_length": 62,
"blob_id": "4afba79710530f03904b282568ab14446d3e4605",
"content_id": "0ae27164ef17030db27e01ce6b975e8f8010e240",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 322,
"license_type": "no_license",
"max_line_length": 233,
"num_lines": 5,
"path": "/docs/hardware/incompatibility_oric.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Board Incompatibility\r\n\r\nSome oric crashes sometimes with the board. If you have an amplibus (as Silicebit one with 74HCT541), you can plug it in the oric, and insert the expansion board from twilighte board in it, and the twilighte board in this last board.\r\n\r\nIt should solve all Oric compatibility with the board.\r\n\r\n"
},
{
"alpha_fraction": 0.5800163745880127,
"alphanum_fraction": 0.6037343144416809,
"avg_line_length": 20.957063674926758,
"blob_id": "f98e746caf071aaa69b46f0e555f0d2835fbed53",
"content_id": "7e544aa23ba63354e7d90011f91c7e0bc0198bb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 15869,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 722,
"path": "/doxygen/doxybook_output/Files/vi_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/commands/vi.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/commands/vi.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[_vi](Files/vi_8s.md#function--vi)**() |\n\n## Defines\n\n| | Name |\n| -------------- | -------------- |\n| | **[CR](Files/vi_8s.md#define-cr)** |\n| | **[LF](Files/vi_8s.md#define-lf)** |\n| | **[IS_EOF](Files/vi_8s.md#define-is-eof)** |\n| | **[IS_BEGINNING_OF_THE_FILE](Files/vi_8s.md#define-is-beginning-of-the-file)** |\n| | **[IS_LAST_LINE_OF_SCREEN_TEXT](Files/vi_8s.md#define-is-last-line-of-screen-text)** |\n| | **[IS_BEGINNING_OF_THE_LINE](Files/vi_8s.md#define-is-beginning-of-the-line)** |\n| | **[VI_LAST_LINE_EDITOR](Files/vi_8s.md#define-vi-last-line-editor)** |\n| | **[VI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE](Files/vi_8s.md#define-vi-fill-screen-mode-stop-at-the-end-of-last-line)** |\n| | **[VI_COMMANDLINE_MAX_CHAR](Files/vi_8s.md#define-vi-commandline-max-char)** |\n| | **[VI_MAX_LENGTH_FILE](Files/vi_8s.md#define-vi-max-length-file)** |\n| | **[VI_EDITOR_CHAR_LIMITS_EMPTY](Files/vi_8s.md#define-vi-editor-char-limits-empty)** |\n| | **[VI_COMMANDLINE_VIDEO_ADRESS](Files/vi_8s.md#define-vi-commandline-video-adress)** |\n| | **[VI_EDITION_LAST_VIDEO_ADRESS](Files/vi_8s.md#define-vi-edition-last-video-adress)** |\n| | **[VI_EDITION_VIDEO_ADRESS](Files/vi_8s.md#define-vi-edition-video-adress)** |\n| | **[VI_EDITOR_MAX_LENGTH_OF_A_LINE](Files/vi_8s.md#define-vi-editor-max-length-of-a-line)** |\n| | **[VI_EDITOR_MAX_COLUMN](Files/vi_8s.md#define-vi-editor-max-column)** |\n| | **[vi_struct](Files/vi_8s.md#define-vi-struct)** |\n| | **[vi_ptr1](Files/vi_8s.md#define-vi-ptr1)** |\n| | **[vi_argv](Files/vi_8s.md#define-vi-argv)** |\n| | **[vi_argc](Files/vi_8s.md#define-vi-argc)** |\n| | **[vi_arg1](Files/vi_8s.md#define-vi-arg1)** |\n| | **[vi_fp](Files/vi_8s.md#define-vi-fp)** |\n| | **[vi_ptr_file](Files/vi_8s.md#define-vi-ptr-file)** |\n| | **[vi_tmp2](Files/vi_8s.md#define-vi-tmp2)** |\n| | **[vi_ptr_screen](Files/vi_8s.md#define-vi-ptr-screen)** |\n| | **[vi_length_file](Files/vi_8s.md#define-vi-length-file)** |\n| | **[vi_length_file_compute](Files/vi_8s.md#define-vi-length-file-compute)** |\n| | **[vi_tmp1](Files/vi_8s.md#define-vi-tmp1)** |\n| | **[vi_fileopened](Files/vi_8s.md#define-vi-fileopened)** |\n| | **[vi_ptr_file_used](Files/vi_8s.md#define-vi-ptr-file-used)** |\n| | **[tmp0_16](Files/vi_8s.md#define-tmp0-16)** |\n| | **[vi_no_opened_file](Files/vi_8s.md#define-vi-no-opened-file)** |\n| | **[vi_pos_debug](Files/vi_8s.md#define-vi-pos-debug)** |\n| | **[vi_ptr2](Files/vi_8s.md#define-vi-ptr2)** |\n| | **[vi_savex](Files/vi_8s.md#define-vi-savex)** |\n| | **[vi_ptr3](Files/vi_8s.md#define-vi-ptr3)** |\n| | **[vi_tmp3](Files/vi_8s.md#define-vi-tmp3)** |\n\n\n## Functions Documentation\n\n### function _vi\n\n```cpp\n_vi()\n```\n\n\n\n\n## Macros Documentation\n\n### define CR\n\n```cpp\n#define CR $0D\n```\n\n\n### define LF\n\n```cpp\n#define LF $0A\n```\n\n\n### define IS_EOF\n\n```cpp\n#define IS_EOF $00\n```\n\n\n### define IS_BEGINNING_OF_THE_FILE\n\n```cpp\n#define IS_BEGINNING_OF_THE_FILE $00\n```\n\n\n### define IS_LAST_LINE_OF_SCREEN_TEXT\n\n```cpp\n#define IS_LAST_LINE_OF_SCREEN_TEXT $01\n```\n\n\n### define IS_BEGINNING_OF_THE_LINE\n\n```cpp\n#define IS_BEGINNING_OF_THE_LINE $00\n```\n\n\n### define VI_LAST_LINE_EDITOR\n\n```cpp\n#define VI_LAST_LINE_EDITOR 26\n```\n\n\nLast line used by the editor \n\n\n### define VI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE\n\n```cpp\n#define VI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE 0\n```\n\n\n### define VI_COMMANDLINE_MAX_CHAR\n\n```cpp\n#define VI_COMMANDLINE_MAX_CHAR 8\n```\n\n\n### define VI_MAX_LENGTH_FILE\n\n```cpp\n#define VI_MAX_LENGTH_FILE 2000\n```\n\n\n### define VI_EDITOR_CHAR_LIMITS_EMPTY\n\n```cpp\n#define VI_EDITOR_CHAR_LIMITS_EMPTY '~'\n```\n\n\n### define VI_COMMANDLINE_VIDEO_ADRESS\n\n```cpp\n#define VI_COMMANDLINE_VIDEO_ADRESS $bb80+40*27\n```\n\n\n### define VI_EDITION_LAST_VIDEO_ADRESS\n\n```cpp\n#define VI_EDITION_LAST_VIDEO_ADRESS $bb80+40*26\n```\n\n\n### define VI_EDITION_VIDEO_ADRESS\n\n```cpp\n#define VI_EDITION_VIDEO_ADRESS $bb80\n```\n\n\n### define VI_EDITOR_MAX_LENGTH_OF_A_LINE\n\n```cpp\n#define VI_EDITOR_MAX_LENGTH_OF_A_LINE 255\n```\n\n\n### define VI_EDITOR_MAX_COLUMN\n\n```cpp\n#define VI_EDITOR_MAX_COLUMN 39\n```\n\n\n### define vi_struct\n\n```cpp\n#define vi_struct userzp\n```\n\n\n2 bytes \n\n\n### define vi_ptr1\n\n```cpp\n#define vi_ptr1 userzp+2\n```\n\n\n2 bytes \n\n\n### define vi_argv\n\n```cpp\n#define vi_argv userzp+4\n```\n\n\n2 bytes \n\n\n### define vi_argc\n\n```cpp\n#define vi_argc userzp+6\n```\n\n\n1 bytes \n\n\n### define vi_arg1\n\n```cpp\n#define vi_arg1 userzp+7\n```\n\n\n2 bytes \n\n\n### define vi_fp\n\n```cpp\n#define vi_fp userzp+9\n```\n\n\n2 bytes \n\n\n### define vi_ptr_file\n\n```cpp\n#define vi_ptr_file userzp+11\n```\n\n\n2 bytes \n\n\n### define vi_tmp2\n\n```cpp\n#define vi_tmp2 userzp+13\n```\n\n\n### define vi_ptr_screen\n\n```cpp\n#define vi_ptr_screen userzp+14\n```\n\n\n2 bytes \n\n\n### define vi_length_file\n\n```cpp\n#define vi_length_file userzp+16\n```\n\n\n2 bytes \n\n\n### define vi_length_file_compute\n\n```cpp\n#define vi_length_file_compute userzp+18\n```\n\n\n2 bytes \n\n\n### define vi_tmp1\n\n```cpp\n#define vi_tmp1 userzp+20\n```\n\n\n### define vi_fileopened\n\n```cpp\n#define vi_fileopened userzp+22\n```\n\n\n### define vi_ptr_file_used\n\n```cpp\n#define vi_ptr_file_used userzp+24\n```\n\n\n2 bytes \n\n\n### define tmp0_16\n\n```cpp\n#define tmp0_16 userzp+26\n```\n\n\n### define vi_no_opened_file\n\n```cpp\n#define vi_no_opened_file userzp+28\n```\n\n\n### define vi_pos_debug\n\n```cpp\n#define vi_pos_debug userzp+30\n```\n\n\n### define vi_ptr2\n\n```cpp\n#define vi_ptr2 userzp+32\n```\n\n\n2 bytes \n\n\n### define vi_savex\n\n```cpp\n#define vi_savex userzp+34\n```\n\n\n2 bytes \n\n\n### define vi_ptr3\n\n```cpp\n#define vi_ptr3 userzp+36\n```\n\n\n2 bytes \n\n\n### define vi_tmp3\n\n```cpp\n#define vi_tmp3 userzp+38\n```\n\n\n2 bytes \n\n\n## Source code\n\n```cpp\n; Limits max line : 256 bytes\n; Max file : VI_MAX_LENGTH_FILE (2000 bytes)\n; max edit char per line : 39\n\n; Max 2KB pour un fichier (arbitraire)\n; Pas de possibilité d'éditer une ligne de plus de 39 chars (donc pas de gestion de retour à la ligne quand une ligne fait plus de 40 chars\n; Quand on remonte ou quand on descend le curseur, il revient toujours à la position X=0 même si on était en milieu de ligne\n; Pas de gestion du mode écrasement\n; Pas de possibilité d'avoir une touche pour aller en fin de ligne\n; gère le mode windows pour les retours à la ligne, et ne marchera pas en retour chariot unix\n\n\n.macro vi_dec16_zp addr\n\n lda addr ; 98\n bne *+2 ; go to label\n dec addr+1\n;label:\n dec addr\n.endmacro\n\nCR = $0D\nLF = $0A\n\nIS_EOF = $00\nIS_BEGINNING_OF_THE_FILE = $00\nIS_LAST_LINE_OF_SCREEN_TEXT = $01\nIS_BEGINNING_OF_THE_LINE = $00\n\n\nVI_LAST_LINE_EDITOR = 26 ; Last line used by the editor\nVI_FILL_SCREEN_MODE_STOP_AT_THE_END_OF_LAST_LINE = 0\nVI_COMMANDLINE_MAX_CHAR = 8\nVI_MAX_LENGTH_FILE = 2000\nVI_EDITOR_CHAR_LIMITS_EMPTY = '~'\nVI_COMMANDLINE_VIDEO_ADRESS := $bb80+40*27\nVI_EDITION_LAST_VIDEO_ADRESS := $bb80+40*26\nVI_EDITION_VIDEO_ADRESS := $bb80\nVI_EDITOR_MAX_LENGTH_OF_A_LINE = 255\nVI_EDITOR_MAX_COLUMN = 39\n\n\n\n;* labels prefixed with _ are populated from C*/\n vi_struct := userzp ; 2 bytes\n vi_ptr1 := userzp+2 ; 2 bytes\n vi_argv := userzp+4 ; 2 bytes\n vi_argc := userzp+6 ; 1 bytes\n vi_arg1 := userzp+7 ; 2 bytes\n vi_fp := userzp+9 ; 2 bytes\n vi_ptr_file := userzp+11 ; 2 bytes\n vi_tmp2 := userzp+13\n vi_ptr_screen := userzp+14 ; 2 bytes\n vi_length_file := userzp+16 ; 2 bytes\n vi_length_file_compute := userzp+18 ; 2 bytes\n vi_tmp1 := userzp+20\n vi_fileopened := userzp+22\n vi_ptr_file_used := userzp+24 ; 2 bytes\n tmp0_16 := userzp+26\n vi_no_opened_file := userzp+28\n vi_pos_debug := userzp+30\n vi_ptr2 := userzp+32 ; 2 bytes\n vi_savex := userzp+34 ; 2 bytes\n vi_ptr3 := userzp+36 ; 2 bytes\n vi_tmp3 := userzp+38 ; 2 bytes\n\n\n\n\n.include \"data/vi/strings.s\"\n.include \"functions/vi_fill_screen_with_empty_line.s\"\n\n.include \"functions/vi_struct.s\"\n.include \"functions/vi_displays_info.s\"\n\n.include \"functions/subfunc/vi/vi_init_vi_struct.s\"\n\n.include \"functions/subfunc/vi/vi_ptr_last_char_plus_plus.s\"\n.include \"functions/subfunc/vi/vi_ptr_last_char_sub_sub.s\"\n.include \"functions/subfunc/vi/vi_set_ptr_last_char.s\"\n.include \"functions/subfunc/vi/vi_ptr_last_char_add.s\"\n.include \"functions/subfunc/vi/vi_vi_ptr_file_used_plus_plus.s\"\n\n.include \"functions/subfunc/vi/vi_xpos_screen_plus_plus.s\"\n.include \"functions/subfunc/vi/vi_xpos_screen_sub_sub.s\"\n\n.include \"functions/subfunc/vi/vi_ypos_screen_sub_sub.s\"\n.include \"functions/subfunc/vi/vi_ypos_screen_plus_plus.s\"\n\n.include \"functions/subfunc/vi/vi_ptr_file_used_plus_plus.s\"\n.include \"functions/subfunc/vi/vi_length_file_sub_sub.s\"\n.include \"functions/subfunc/vi/vi_display_char.s\"\n.include \"functions/subfunc/vi/vi_check_beginning_of_file.s\"\n.include \"functions/subfunc/vi/vi_fill_last_line.s\"\n.include \"functions/subfunc/vi/vi_copy_arg1_to_name_file_open.s\"\n.include \"functions/subfunc/vi/vi_length_file_plus_plus.s\"\n.include \"functions/subfunc/vi/vi_compute_video_adress.s\"\n.include \"functions/subfunc/vi/vi_ptr_file_used_sub_sub.s\"\n.include \"functions/subfunc/vi/vi_display_file_opened.s\"\n.include \"functions/subfunc/vi/vi_check_0A.s\"\n.include \"functions/subfunc/vi/vi_set_xpos_0.s\"\n.include \"functions/subfunc/vi/vi_ptr_file_used_plus_plus_and_check_eof.s\"\n.include \"functions/subfunc/vi/vi_search_next_line.s\"\n.include \"functions/subfunc/vi/vi_shift_file_from_memory_one_char.s\"\n.include \"functions/subfunc/vi/vi_check_inserted_char_overflow_the_max_column.s\"\n.include \"functions/subfunc/vi/vi_scroll_from_left_to_right_full_line.s\"\n.include \"functions/subfunc/vi/vi_shift_line_left_to_right_editor.s\"\n.include \"functions/subfunc/vi/vi_search_previous_line_beginning.s\"\n.include \"functions/subfunc/vi/vi_set_xpos_from_A.s\"\n.include \"functions/subfunc/vi/vi_scroll_to_left.s\"\n.include \"functions/subfunc/vi/vi_check_if_previous_line_was_truncated.s\"\n.include \"functions/subfunc/vi/vi_search_previous_cr.s\"\n.include \"functions/subfunc/vi/vi_add_char_to_text.s\"\n.include \"functions/subfunc/vi/vi_strlen_current_line.s\"\n.include \"functions/subfunc/vi/vi_compute_last_text_line.s\"\n\n.include \"functions/subfunc/vi/vi_clear_command_line.s\"\n\n.include \"functions/subfunc/vi/vi_key_down.s\"\n.include \"functions/subfunc/vi/vi_key_enter.s\"\n.include \"functions/subfunc/vi/vi_key_del.s\"\n.include \"functions/subfunc/vi/vi_key_up.s\"\n.include \"functions/subfunc/vi/vi_key_right.s\"\n.include \"functions/subfunc/vi/vi_key_left.s\"\n.include \"functions/subfunc/vi/vi_check_eof.s\"\n\n.include \"functions/vi_put_char.s\"\n.include \"functions/vi_command_edition.s\"\n.include \"functions/vi_editor_switch_on_cursor.s\"\n.include \"functions/vi_editor_switch_off_cursor.s\"\n.include \"functions/vi_edition_keyboard.s\"\n.include \"functions/vi_switch_to_edition_mode.s\"\n\n.include \"functions/tables.s\"\n.include \"functions/_clrscr_vi.s\"\n\n\n.proc _vi\n\n\n XMAINARGS = $2C\n XGETARGV = $2E\n argv := userzp ; 2 bytes\n argc := userzp+2 ; 1 byte\n\n lda #$00\n sta vi_no_opened_file\n\n\n lda #<$bb80\n sta vi_ptr_screen\n\n lda #>$bb80\n sta vi_ptr_screen+1\n\n\n lda #$00\n sta vi_fileopened\n\n\n malloc .sizeof(vi_struct_data)\n\n cmp #NULL\n bne @not_oom2\n cpy #NULL\n bne @not_oom2\n print str_OOM\n ; oom\n rts\n@not_oom2:\n\n sta vi_struct\n sty vi_struct+1\n\n jsr vi_init_vi_struct\n\n initmainargs vi_argv, vi_argc, 0\n\n lda vi_argc ; Do we have a file on the command line\n cmp #$01\n beq not_opened_file\n\n getmainarg #1, (vi_argv) ,vi_arg1\n\n ; Checking if vi_arg1 is not empty\n\n ; when we type \"vi [space]\" on command line, initmainargs returns argv=2 but the arg is empty\n ; This is a fix to avoid this\n ldy #$00\n@check_arg:\n lda (vi_arg1),y\n beq not_opened_file\n\n@check_filename:\n\n fopen (vi_arg1), O_RDONLY,,vi_fp\n\n cpx #$FF\n bne opened_file\n cmp #$FF\n bne opened_file\n beq not_opened_file\n\n\n\nopened_file:\n lda #$01\n sta vi_fileopened\n\n ; copy into name_file_open of the struct\n jsr vi_copy_arg1_to_name_file_open\n\nnot_opened_file:\n\n cursor OFF\n jsr _clrscr_vi\n\n malloc #VI_MAX_LENGTH_FILE,vi_ptr_file ; $376B\n\n lda vi_ptr_file ; $3769\n bne @ptr_file_continue\n lda vi_ptr_file+1\n bne @ptr_file_continue\n print str_OOM\n rts\n\n@ptr_file_continue:\n ; Set last char of the file ptr\n\n lda vi_ptr_file\n ldx vi_ptr_file+1\n jsr vi_set_ptr_last_char\n\n lda vi_ptr_file ; Contains the ptr $376B\n sta vi_ptr_file_used\n\n lda vi_ptr_file+1\n sta vi_ptr_file_used+1\n\n lda vi_fileopened\n beq @skip_loading\n\n ; Now load the file\n fread (vi_ptr_file_used), #VI_MAX_LENGTH_FILE, 1, vi_fp ; myptr is from a malloc for example\n\n sta vi_length_file\n sta vi_length_file_compute\n stx vi_length_file+1\n stx vi_length_file_compute+1\n fclose(vi_fp)\n\n\n lda vi_ptr_file\n sta vi_ptr1\n sta vi_ptr_file_used\n\n\n lda vi_ptr_file+1\n sta vi_ptr1+1\n sta vi_ptr_file_used+1\n\n ; now set vi_ptr_last_char with the length of the file\n\n lda vi_length_file\n ldx vi_length_file+1\n jsr vi_ptr_last_char_add\n\n ; Check eof\n ; Dans le cas où on a chargé avant un fichier, comme on ne s'arrete pas à l'eof du fichier courant, cela va continuer a s'afficher alors même qu'on est à la fin du fichier\n ; Cela va afficher l'ancien fichier chargé. On compare donc pour s'arreter.\n\n jsr vi_display_file_opened\n\n@skip_fill_last_line:\n\n ldx #$01\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n beq @no_compute ; First line ? Then X=1 in order to display ~ on the second line\n cmp #VI_LAST_LINE_EDITOR\n beq @no_empty_line_begin\n\n tax\n inx\n\n@no_compute:\n ldy #$01\n jmp @display_empty_line_begin\n\n@skip_loading:\n ldy #$01 ; For a new_file\n ldx #$01\n@display_empty_line_begin:\n jsr vi_fill_screen_with_empty_line\n\n@no_empty_line_begin:\n ; Set cursor position to 0,0\n ldy #vi_struct_data::ypos_screen\n lda #$00\n sta (vi_struct),y\n\n ldy #vi_struct_data::xpos_screen\n sta (vi_struct),y\n\n lda vi_ptr_file ; Contains the ptr\n sta vi_ptr_file_used\n\n lda vi_ptr_file+1\n sta vi_ptr_file_used+1\n\n\n@loop_until_esc_is_pressed:\n\n jsr vi_edition_keyboard\n\n cmp #$01\n\n beq @final_exit\n jmp @loop_until_esc_is_pressed\n\n@final_exit:\n rts\n\n; **********************************\n\n\n\n\nstr_OOM:\n .asciiz \"OOM\" ; FIXME import from general lib\nstr_not_found:\n .asciiz \"File not found\"\n\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.4756801426410675,
"alphanum_fraction": 0.5193734765052795,
"avg_line_length": 14.753246307373047,
"blob_id": "da519ec6c99fa2b8fb63e9edf58758711e33c36c",
"content_id": "394c08a9c89447a008ca7b0e38ea3cc809876d5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1213,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 77,
"path": "/doxygen/doxybook_output_vi/Files/vi__shift__file__from__memory__one__char_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_shift_file_from_memory_one_char.s\n\n---\n\n# vi_shift_file_from_memory_one_char.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_shift_file_from_memory_one_char](Files/vi__shift__file__from__memory__one__char_8s.md#Routine-vi-shift-file-from-memory-one-char)** |\n\n\n## Routine documentation\n\n### Routine vi_shift_file_from_memory_one_char\n\n```ca65\nvi_shift_file_from_memory_one_char\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_shift_file_from_memory_one_char\n\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_ptr1 ; A4 A0 39\n iny\n lda (vi_struct),y\n sta vi_ptr1+1 ; 3A 3A 08\n\n@copy_char_routine:\n ldy #$00\n\n lda (vi_ptr1),y\n iny\n sta (vi_ptr1),y\n\n lda vi_ptr1\n bne @S1\n dec vi_ptr1+1\n@S1:\n dec vi_ptr1\n\n\n@L1:\n lda vi_ptr1\n cmp vi_ptr_file_used\n bne @copy_char_routine\n\n lda vi_ptr1+1\n cmp vi_ptr_file_used+1\n bne @copy_char_routine\n\n ldy #$00\n\n lda (vi_ptr1),y\n iny\n sta (vi_ptr1),y\n\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7282184362411499,
"alphanum_fraction": 0.7438231706619263,
"avg_line_length": 26.464284896850586,
"blob_id": "8fdefda6798d4c227628e871f2b0e38c1e8e5ec5",
"content_id": "60e503b9a1736ff7cae966f7b9e9d7aeddf7bc13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1538,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 56,
"path": "/docs/commands/commands_for_nav.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "- asm2k2: commands/asm2k2.md\n- bank: commands/bank.md\n- basic10: commands/basic10.md\n- basic11: commands/basic11.md\n- barboric: commands/barboric.md\n- blakes7: commands/blakes7.md\n- bootfd: commands/bootfd.md\n- born1983: commands/born1983.md\n- cat: commands/cat.md\n- cd: commands/cd.md\n- cksum: commands/cksum.md\n- clear: commands/clear.md\n- cp: commands/cp.md\n- df: commands/df.md\n- otimer: commands/otimer.md\n- dsk-util: commands/dsk-util.md\n- echo: commands/echo.md\n- env: commands/env.md\n- forth: commands/forth.md\n- ftdos: commands/ftdos.md\n- grep: commands/grep.md\n- help: commands/help.md\n- hexdump: commands/hexdump.md\n- ioports: commands/ioports.md\n- list: commands/list.md\n- ls: commands/ls.md\n- lscpu: commands/lscpu.md\n- lsmem: commands/lsmem.md\n- loader: commands/loader.md\n- man: commands/man.md\n- mkdir: commands/mkdir.md\n- mount: commands/mount.md\n- more: commands/more.md\n- orixcfg: commands/orixcfg.md\n- pwd: commands/pwd.md\n- ps: commands/ps.md\n- quintes: commands/quintes.md\n- raw2dsk: commands/raw2dsk.md\n- readdsk: commands/readdsk.md\n- reboot: commands/reboot.md\n- rm: commands/rm.md\n- setfont: commands/setfont.md\n- loader: commands/loader.md\n- twilconf: commands/twilconf.md\n- strerr: commands/strerr.md\n- sh: commands/sh.md\n- submit: commands/submit.md\n- touch: commands/touch.md\n- twil: commands/twil.md\n- twiload: commands/twiload.md\n- uname: commands/uname.md\n- untar: commands/untar.md\n- vidplay: commands/vidplay.md\n- viewscr: commands/viewscr.md\n- viewhrs: commands/viewhrs.md\n- zerofx: commands/zerofx.md\n"
},
{
"alpha_fraction": 0.5986841917037964,
"alphanum_fraction": 0.6074561476707458,
"avg_line_length": 17,
"blob_id": "e813c6610182b572050b0022e957fa7bb3be1544",
"content_id": "c49b91bbe3c962ae22983239fed8ec00cb1a96fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 456,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 24,
"path": "/docs/kernel/primitives/xrm.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XRM\r\n\r\n## Description\r\n\r\nRemove file in arg\r\n\r\n* Input : AX the ptr\r\n* Returns ENOENT in A if the file does not exist\r\n* Returns $00 in A if the rm is done\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<myfile_to_remove\r\n ldx #>myfile_to_remove\r\n BRK_TELEMON XRM\r\n rts\r\nmyfile_to_remove:\r\n .asciiz \"toto\"\r\n```\r\n\r\n!!! tip \"See [unlink](../../developer_manual/orixsdk_macros/unlink) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.5688405632972717,
"alphanum_fraction": 0.5760869383811951,
"avg_line_length": 10.545454978942871,
"blob_id": "387ef26dc372d65a926ef4bd23447a9b692134b9",
"content_id": "c9fba8e9c6b0dfb8343fe4233aa8d5213b676bb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 22,
"path": "/docs/developer_manual/orixsdk_macros/shoot.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Shoot\r\n\r\n## Description\r\n\r\nProduce a shoot sound\r\n\r\n## Usage\r\n\r\nshoot\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_sound.mac\"\r\n\r\n shoot\r\n rts\r\n\r\n```\r\n\r\nCall [XSHOOT](../../../kernel/primitives/xshoot/) kernel function.\r\n"
},
{
"alpha_fraction": 0.5802631378173828,
"alphanum_fraction": 0.6078947186470032,
"avg_line_length": 13.833333015441895,
"blob_id": "a36c8171b58e221298aa6d5fa3dd6c99c6c1a2c6",
"content_id": "84b687d577a14c9cadd637581ddd954181e6a50c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 760,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 48,
"path": "/docs/developer_manual/orixsdk_macros/print_int.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Print_int\r\n\r\n## Description\r\n\r\nDisplays integer\r\n\r\n## Usage\r\n\r\nprint_int [ptr], [len], [char]\r\n\r\nnote:\r\nptr may be: (ptr), address, <empty>\r\nlen: value\r\nchar: value (DEFAFF is not restored)\r\n\r\n## Example 1\r\n\r\nThe value is from mynumber offset\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_print.mac\"\r\n\r\n print_int mynumber, 2, 2\r\n rts\r\n\r\nmynumber:\r\n .byt 12\r\n```\r\n\r\n## Example 2\r\n\r\nThe value is from registers\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_print.mac\"\r\n\r\n lda #12 ; 12\r\n ldy #00 ; 0 because the number is 12 (from A)\r\n print_int ,2, 2 ; an arg is skipped because the number is from register\r\n rts\r\n\r\n\r\n```\r\n\r\n\r\nCall [XDECIM](../../../kernel/primitives/xdecim/) xdecim\r\n"
},
{
"alpha_fraction": 0.6901408433914185,
"alphanum_fraction": 0.707355260848999,
"avg_line_length": 19.233333587646484,
"blob_id": "b7ddcac252636bd7c4e29c0c9a23d2a63cf3eaa5",
"content_id": "600fbccbf04234badf91f05bcd6d295498d7835f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 639,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 30,
"path": "/docs/hardware/hardware_issues.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Hardwares issues\r\n\r\n## Oric does not work with the board\r\n\r\nSee Incompatibilty section\r\n\r\n## The screen is black at the first boot\r\n\r\nOn some Oric, the board does not start at boot time. If you press reset, it should start normally\r\n\r\n## USB mass storage and sdcard compatibility\r\n\r\nSome tests had been done on some usb mass storage key and sdcard, here is some informations\r\n\r\n### Working sdcard\r\n\r\n* samsung evo 32GB and 16GB\r\n\r\n* samsung evo plus 32GB and 16GB\r\n\r\n* and many others\r\n\r\n### Working usb mass storage\r\n\r\n* Sandisk 32GB\r\n* pi zero in gadget mode\r\n\r\n### Non working usb mass storage key\r\n\r\n* verbatim 'store and go' 8GB\r\n\r\n"
},
{
"alpha_fraction": 0.7295597195625305,
"alphanum_fraction": 0.7704402804374695,
"avg_line_length": 34.22222137451172,
"blob_id": "867f08d342cf631b3101e5ab02754ee3ae6ccf0d",
"content_id": "98e25b9b668057d2b2ef1cf911786895c6f9d651",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 9,
"path": "/docs/hardware/memory.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Memory\n\nTwilighte boards handles 512KB of RAM (SRAM) and 512KB of eeprom.\n\nTheses RAM and ROM are in banking mode : It replaces the top memory ($c000 to $FFFF) as Overlay ram works with microdisc systems.\n\nThere is 32 banks of RAMs and 32 banks of ROMs\n\nbank command line command can displays the availables banks.\n\n"
},
{
"alpha_fraction": 0.6160337328910828,
"alphanum_fraction": 0.6244725584983826,
"avg_line_length": 12.8125,
"blob_id": "62b74f7e483973be8b82d053583d9bd4e82dd602",
"content_id": "d9e695ed48d8f299372b5d60a5114a48da4f3181",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 474,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 32,
"path": "/docs/developer_manual/orixsdk_macros/print.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Print macro\r\n\r\n## Description\r\n\r\nprint string in text mode\r\n\r\n## usage\r\n\r\nprint #byte [,SAVE]\r\nprint (pointer) [,SAVE]\r\nprint address [,SAVE]\r\n\r\nOption:\r\n\r\n- NOSAVE : does not preserve A,X,Y registers\r\n\r\n## Example\r\n\r\n```ca65\r\n\r\n .include \"../orix-sdk/macros/SDK_print.mac\"\r\n .include \"telestrat.inc\"\r\n\r\n print helloworld_str\r\n crlf ; Return line\r\n rts\r\n\r\nhelloworld_str:\r\n .asciiz \"Hello world!\"\r\n```\r\n\r\nCall [XWSTR0](../../../kernel/primitives/xwstr0/) function.\r\n"
},
{
"alpha_fraction": 0.6127321124076843,
"alphanum_fraction": 0.6180371642112732,
"avg_line_length": 14.391304016113281,
"blob_id": "444b9e93ba69d0fbad2d50f25cfee144a30285c5",
"content_id": "472c1c7b5a0ae4e83502d4ac124e65c471f19bf3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 23,
"path": "/docs/developer_manual/orixsdk_macros/setscreen.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Setscreen\r\n\r\n## Description\r\n\r\nSwitch to text or hires\r\n\r\n## Usage\r\n\r\nsetscreen [text|hires]\r\n\r\nCall XTEXT/XHIRES functions\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_display.mac\"\r\n\r\n setscreen hires\r\n rts\r\n```\r\n\r\nCall [XTEXT](../../../kernel/primitives/xtext/) routine or [XHIRES](../../../kernel/primitives/xhires/)\r\n"
},
{
"alpha_fraction": 0.49407583475112915,
"alphanum_fraction": 0.5296208262443542,
"avg_line_length": 15.230769157409668,
"blob_id": "3073084c5444399b09a75e2ecb888368597098fb",
"content_id": "64856898f63db1b855e4918a25bdb5d91a692df9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 844,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 52,
"path": "/docs/tools_docs/vi/Files/vi__xpos__screen__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_xpos_screen_sub_sub.s\n\n---\n\n# vi_xpos_screen_sub_sub.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_xpos_screen_sub_sub](Files/vi__xpos__screen__sub__sub_8s.md#Routine-vi-xpos-screen-sub-sub)** |\n\n\n## Routine documentation\n\n### Routine vi_xpos_screen_sub_sub\n\n```ca65\nvi_xpos_screen_sub_sub\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_xpos_screen_sub_sub\n ;; vi_xpos_screen_sub_sub\n ;; Return 00 if we are on the first line\n ;; xpos_screen=ypos_screen+1\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n beq @no_substract\n sec\n sbc #$01\n sta (vi_struct),y\n lda #$01\n rts\n@no_substract:\n lda #IS_BEGINNING_OF_THE_LINE\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5645756721496582,
"alphanum_fraction": 0.5719557404518127,
"avg_line_length": 10.904762268066406,
"blob_id": "5395abb198a6605820aea81da1875128ff04895e",
"content_id": "87a8c1b9f17e990d0d89f32f02a3954b3a82b21c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 21,
"path": "/docs/developer_manual/orixsdk_macros/crlf.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# crlf\r\n\r\n## Description\r\n\r\nReturn to the next line\r\n\r\n## Usage\r\n\r\ncrlf\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_conio.mac\"\r\n\r\n crlf\r\n rts\r\n```\r\n\r\nCall [XCRLF](../../../kernel/primitives/XCRLF/) kernel function.\r\n"
},
{
"alpha_fraction": 0.2975206673145294,
"alphanum_fraction": 0.44628098607063293,
"avg_line_length": 6.5625,
"blob_id": "05fc1639f2ba686815ddf570a5e3254bcce00b11",
"content_id": "b3ef19a051bf0a875384ef9057802dd6ac77a19a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 121,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 16,
"path": "/doxygen/doxybook_output/index_namespaces.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Namespaces\n\n---\n\n# Namespaces\n\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7361963391304016,
"alphanum_fraction": 0.7423312664031982,
"avg_line_length": 28.10714340209961,
"blob_id": "d88680fdc1fb6a5226358dabf21710dadb5bb2d2",
"content_id": "beefd10cc9735c7355f94677f0472bf87d155481",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 820,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 28,
"path": "/docs/commands/bootfd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: bootfd\n\n## Starts microdisc bootsector\n\nbootfd is a tool to boot the boot sector when a drive is connected.\nInsert a disk and type :\n\n/#bootfd\n\nThe binary is located in \"/bin\" folder. It will load microdisc rom and\nwill start it. If it displays \"microdis.rom not found\",\nthen place \"microdis.rom\" file in the right folder.\n\nIf you have firmware 1, you will be able to load « blake's 7 ». If you\nhave firmware 2, all sedoric .dsk should start.\n\nFor instance, only Space99 does not work, it stops after intro.\n\n## SYNOPSYS\n+ bootfd\n\n## EXAMPLES\n+ bootfd\n\nBoots only microdisc boot sector with the help of microdisc.rom. In this version, we don't verify if a dsk is inserted or not. If there is no disk, the oric will crash\n\n## SOURCE\nSrc code : https://github.com/orix-software/bootfd.git (Assembly and C)\n"
},
{
"alpha_fraction": 0.6530612111091614,
"alphanum_fraction": 0.6530612111091614,
"avg_line_length": 7.800000190734863,
"blob_id": "d8cd151bfb57a56e2c6a1bdc1bfc92c7627a5e31",
"content_id": "80707633b4dc3d53c3106315f55ecb99040178da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 5,
"path": "/docs/kernel/primitives/xcsscr.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XCSSCR\r\n\r\n## Description\r\n\r\nSwitch off cursor\r\n"
},
{
"alpha_fraction": 0.714318037033081,
"alphanum_fraction": 0.7224674820899963,
"avg_line_length": 130.8656768798828,
"blob_id": "9bd07a9abc722a4374e9845da8872c9e82d0fcbd",
"content_id": "a34fa6ab8680633450f809c1b03bf5fd2db9b3c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8835,
"license_type": "no_license",
"max_line_length": 263,
"num_lines": 67,
"path": "/doxygen/doxybook_output/Files/dir_a5544c2bf0b70f8d417c4d3bfea04409.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi\n\n\n\n## Files\n\n| Name |\n| -------------- |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_add_char_to_text.s](Files/vi__add__char__to__text_8s.md#file-vi-add-char-to-text.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_0A.s](Files/vi__check__0A_8s.md#file-vi-check-0a.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_beginning_of_file.s](Files/vi__check__beginning__of__file_8s.md#file-vi-check-beginning-of-file.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_eof.s](Files/vi__check__eof_8s.md#file-vi-check-eof.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_if_previous_line_was_truncated.s](Files/vi__check__if__previous__line__was__truncated_8s.md#file-vi-check-if-previous-line-was-truncated.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_inserted_char_overflow_the_max_column.s](Files/vi__check__inserted__char__overflow__the__max__column_8s.md#file-vi-check-inserted-char-overflow-the-max-column.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_clear_command_line.s](Files/vi__clear__command__line_8s.md#file-vi-clear-command-line.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_compute_last_text_line.s](Files/vi__compute__last__text__line_8s.md#file-vi-compute-last-text-line.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_compute_video_adress.s](Files/vi__compute__video__adress_8s.md#file-vi-compute-video-adress.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_copy_arg1_to_name_file_open.s](Files/vi__copy__arg1__to__name__file__open_8s.md#file-vi-copy-arg1-to-name-file-open.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_decal_text.s](Files/vi__decal__text_8s.md#file-vi-decal-text.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_display_char.s](Files/vi__display__char_8s.md#file-vi-display-char.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_display_file_opened.s](Files/vi__display__file__opened_8s.md#file-vi-display-file-opened.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_fill_last_line.s](Files/vi__fill__last__line_8s.md#file-vi-fill-last-line.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_init_vi_struct.s](Files/vi__init__vi__struct_8s.md#file-vi-init-vi-struct.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_del.s](Files/vi__key__del_8s.md#file-vi-key-del.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_down.s](Files/vi__key__down_8s.md#file-vi-key-down.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_enter.s](Files/vi__key__enter_8s.md#file-vi-key-enter.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_left.s](Files/vi__key__left_8s.md#file-vi-key-left.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_right.s](Files/vi__key__right_8s.md#file-vi-key-right.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_up.s](Files/vi__key__up_8s.md#file-vi-key-up.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_length_file_plus_plus.s](Files/vi__length__file__plus__plus_8s.md#file-vi-length-file-plus-plus.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_length_file_sub_sub.s](Files/vi__length__file__sub__sub_8s.md#file-vi-length-file-sub-sub.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_plus_plus.s](Files/vi__ptr__file__used__plus__plus_8s.md#file-vi-ptr-file-used-plus-plus.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_plus_plus_and_check_eof.s](Files/vi__ptr__file__used__plus__plus__and__check__eof_8s.md#file-vi-ptr-file-used-plus-plus-and-check-eof.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_sub_sub.s](Files/vi__ptr__file__used__sub__sub_8s.md#file-vi-ptr-file-used-sub-sub.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_add.s](Files/vi__ptr__last__char__add_8s.md#file-vi-ptr-last-char-add.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_plus_plus.s](Files/vi__ptr__last__char__plus__plus_8s.md#file-vi-ptr-last-char-plus-plus.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_last_char_sub_sub.s](Files/vi__ptr__last__char__sub__sub_8s.md#file-vi-ptr-last-char-sub-sub.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_scroll_from_left_to_right_full_line.s](Files/vi__scroll__from__left__to__right__full__line_8s.md#file-vi-scroll-from-left-to-right-full-line.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_scroll_to_left.s](Files/vi__scroll__to__left_8s.md#file-vi-scroll-to-left.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_next_line.s](Files/vi__search__next__line_8s.md#file-vi-search-next-line.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_previous_cr.s](Files/vi__search__previous__cr_8s.md#file-vi-search-previous-cr.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_search_previous_line_beginning.s](Files/vi__search__previous__line__beginning_8s.md#file-vi-search-previous-line-beginning.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_ptr_last_char.s](Files/vi__set__ptr__last__char_8s.md#file-vi-set-ptr-last-char.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_xpos_0.s](Files/vi__set__xpos__0_8s.md#file-vi-set-xpos-0.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_xpos_from_A.s](Files/vi__set__xpos__from__A_8s.md#file-vi-set-xpos-from-a.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_shift_file_from_memory_one_char.s](Files/vi__shift__file__from__memory__one__char_8s.md#file-vi-shift-file-from-memory-one-char.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_shift_line_left_to_right_editor.s](Files/vi__shift__line__left__to__right__editor_8s.md#file-vi-shift-line-left-to-right-editor.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_strlen_current_line.s](Files/vi__strlen__current__line_8s.md#file-vi-strlen-current-line.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_vi_ptr_file_used_plus_plus.s](Files/vi__vi__ptr__file__used__plus__plus_8s.md#file-vi-vi-ptr-file-used-plus-plus.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_xpos_screen_plus_plus.s](Files/vi__xpos__screen__plus__plus_8s.md#file-vi-xpos-screen-plus-plus.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_xpos_screen_sub_sub.s](Files/vi__xpos__screen__sub__sub_8s.md#file-vi-xpos-screen-sub-sub.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ypos_screen_plus_plus.s](Files/vi__ypos__screen__plus__plus_8s.md#file-vi-ypos-screen-plus-plus.s)** |\n| **[/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ypos_screen_sub_sub.s](Files/vi__ypos__screen__sub__sub_8s.md#file-vi-ypos-screen-sub-sub.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.260869562625885,
"alphanum_fraction": 0.417391300201416,
"avg_line_length": 6.1875,
"blob_id": "09074ebd564a99717171daeab05b86a4b7275d9d",
"content_id": "e918bb5ddbec47b429413acb598856253d31ed06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 16,
"path": "/doxygen/doxybook_output/index_groups.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Modules\n\n---\n\n# Modules\n\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6818181872367859,
"alphanum_fraction": 0.7196969985961914,
"avg_line_length": 21,
"blob_id": "403d0f7883dc54319a7c3ff3ce98f6fd502a133f",
"content_id": "c6aebee4f64dad5bd264e48fb42f72e46289ac65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 792,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 36,
"path": "/docs/update/2022_4.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# v2022.4 and v2022.4.1\n\n## Kernel\n\n* Kernel supports relocation mode binary\n* fixbug in multiples files opened\n\n## CC65 (telestrat target)\n\n* binary from cc65 will be deprecated (without relocbin tool in order to convert it in relocation mode). It needs to get relocbin from orix-sdk : https://github.com/assinie/orix-sdk\n\n## Shell\n\n* cp now works\n* many man pages had been improved\n* bugfix with \"cd /\" which did not work\n\n## How to upgrade\n\n* Download https://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz\n\n* gunzip/untar sdcard.tgz on the twilighte board device\n* type in the orix shell :\n - cd /usr/share/carts/2022.4\n\nif the default device is a usb key\n\n```bash\n/# orixcfg -r -s 4 kernelus.r64\n```\n\nif the default device is a sdcard\n\n```bash\n/# orixcfg -r -s 4 kernelsd.r64\n```\n"
},
{
"alpha_fraction": 0.542185366153717,
"alphanum_fraction": 0.5684647560119629,
"avg_line_length": 16.634145736694336,
"blob_id": "a7fe2e0b39346332eccf18cdf75802cab8cb4ee8",
"content_id": "1f92a746b411329039280bf876b56dea23268ae1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 723,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 41,
"path": "/doxygen/doxybook_output/Files/vi__set__xpos__from__A_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_xpos_from_A.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_set_xpos_from_A.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_set_xpos_from_A](Files/vi__set__xpos__from__A_8s.md#function-vi-set-xpos-from-a)**() |\n\n\n## Functions Documentation\n\n### function vi_set_xpos_from_A\n\n```cpp\nvi_set_xpos_from_A()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_set_xpos_from_A\n ldy #vi_struct_data::xpos_screen\n sta (vi_struct),y\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5362537503242493,
"alphanum_fraction": 0.5709969997406006,
"avg_line_length": 15.974358558654785,
"blob_id": "fa24bb37bdb931c64d19fafa0242b56d99ef4a6e",
"content_id": "acd251bce5af7a56e367dfd58758e304aba0faf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 662,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 39,
"path": "/doxygen/doxybook_output_vi/Files/vi__ptr__file__used__plus__plus__and__check__eof_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_ptr_file_used_plus_plus_and_check_eof.s\n\n---\n\n# vi_ptr_file_used_plus_plus_and_check_eof.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_file_used_plus_plus_and_check_eof](Files/vi__ptr__file__used__plus__plus__and__check__eof_8s.md#Routine-vi-ptr-file-used-plus-plus-and-check-eof)** |\n\n\n## Routine documentation\n\n### Routine vi_ptr_file_used_plus_plus_and_check_eof\n\n```ca65\nvi_ptr_file_used_plus_plus_and_check_eof\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_ptr_file_used_plus_plus_and_check_eof\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5010941028594971,
"alphanum_fraction": 0.5251641273498535,
"avg_line_length": 15.518072128295898,
"blob_id": "f76960cce9de58369cc79ff15f2c5b31a1d5760e",
"content_id": "be3d4575a585ae4f202cb53b9fbf90656ed2e433",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1371,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 83,
"path": "/doxygen/doxybook_output_vi/Files/vi__key__left_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_key_left.s\n\n---\n\n# vi_key_left.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_key_left](Files/vi__key__left_8s.md#Routine-vi-key-left)** |\n\n\n## Routine documentation\n\n### Routine vi_key_left\n\n```ca65\nvi_key_left\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_key_left\n jsr vi_editor_switch_off_cursor\n\n jsr vi_xpos_screen_sub_sub\n cmp #IS_BEGINNING_OF_THE_LINE\n beq @exit_and_check ; x_equal_to_zero\n\n jsr vi_ptr_file_used_sub_sub\n\n@exit:\n rts\n\n@exit_and_check:\n\n ; At this step we are on the column 0 (xpos=0)\n\n jsr vi_check_beginning_of_file\n cmp #$00\n beq @exit\n\n ; Check if the previous char is a crlf\n\n\n ; if posfile[vi_ptr_file_used]=$0A goto @exit_add\n jsr vi_ptr_file_used_sub_sub\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #LF\n beq @exit_add ; LF exit\n\n ; if posfile[vi_ptr_file_used]=$0D goto @exit_add\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @exit_add ; CR ?exit\n\n jsr vi_ypos_screen_sub_sub\n\n ldy #vi_struct_data::xpos_screen\n lda #VI_EDITOR_MAX_COLUMN\n sta (vi_struct),y\n\n rts\n@exit_add:\n jsr vi_ptr_file_used_plus_plus\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5071428418159485,
"alphanum_fraction": 0.5249999761581421,
"avg_line_length": 9.199999809265137,
"blob_id": "de2bf1a6ecf294f63fda1ebf155861582bc1a5dd",
"content_id": "dee29cd58536176ec8391d03664ab42a87530068",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 25,
"path": "/kernel/docs/primitives/xputcwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XPUTCWD (chdir)\r\n\r\n## Description\r\n\r\nChange current process directory\r\n\r\n## Input\r\n\r\nA & Y the 16 bits values (string with \\0)\r\n\r\n## Output\r\n\r\nN/A\r\n\r\n## Example\r\n\r\n``` ca65\r\n\tlda #<str\r\n\tldy #>str\r\n BRK_KERNEL XPUTCWD\r\n rts\r\nstr:\r\n .asciiz \"/etc\"\r\n\r\n```\r\n"
},
{
"alpha_fraction": 0.5784586668014526,
"alphanum_fraction": 0.6035283207893372,
"avg_line_length": 18.581817626953125,
"blob_id": "354d691e05e3c363069c3965d418a1571610f588",
"content_id": "9ade1de4dc22015d16b1a8748b89ce722b2d0a3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1077,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 55,
"path": "/doxygen/doxybook_output/Files/vi__check__beginning__of__file_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_beginning_of_file.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_check_beginning_of_file.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_beginning_of_file](Files/vi__check__beginning__of__file_8s.md#function-vi-check-beginning-of-file)**() |\n\n\n## Functions Documentation\n\n### function vi_check_beginning_of_file\n\n```cpp\nvi_check_beginning_of_file()\n```\n\n\n\n\n## Source code\n\n```cpp\n; A returns 00 if beginning of the file, 01 if not\n\n.proc vi_check_beginning_of_file\n lda vi_ptr_file_used\n cmp vi_ptr_file\n bne @not_beginning\n\n lda vi_ptr_file_used+1\n cmp vi_ptr_file+1\n bne @not_beginning\n\n lda #IS_BEGINNING_OF_THE_FILE ; Beginninng of the file\n rts\n\n@not_beginning:\n lda #$01 ; Not the Beginninng of the file\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.59375,
"avg_line_length": 11.241379737854004,
"blob_id": "40105d18d21942948ce6751dc2b9474fa244f2f9",
"content_id": "ff9035ff8238ebd55dee27eb7b8971683298d4f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 29,
"path": "/docs/developer_manual/orixsdk_macros/cgetc.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# cgetc\r\n\r\n## Description\r\n\r\nRead the keyboard\r\n\r\n## usage\r\n\r\n* cgetc\r\n* cgetc var\r\n\r\nnote:\r\n Keycode in A register and var if provided\r\n\r\ncgetc var\r\n\r\n## example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_conio.mac\"\r\n\r\n cgetc\r\n ; A contains the key pressed\r\n rts\r\n\r\n```\r\n\r\nCall [XRD0](../../../kernel/primitives/XRD0/) kernel function.\r\n"
},
{
"alpha_fraction": 0.5070422291755676,
"alphanum_fraction": 0.5408450961112976,
"avg_line_length": 14.106383323669434,
"blob_id": "6eeb16fc7af0b5813bbec7eedb92fa8af979fb8f",
"content_id": "60de8e3b06eacd783e67e25e2f0a78902cc900e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 710,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 47,
"path": "/docs/tools_docs/vi/Files/vi__compute__video__adress_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_compute_video_adress.s\n\n---\n\n# vi_compute_video_adress.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_compute_video_adress](Files/vi__compute__video__adress_8s.md#Routine-vi-compute-video-adress)** |\n\n\n## Routine documentation\n\n### Routine vi_compute_video_adress\n\n```ca65\nvi_compute_video_adress\n```\n\n\n\n\n## Source code\n\n```ca65\n\n.proc vi_compute_video_adress\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n tay\n lda TABLE_LOW_TEXT,y\n sta vi_ptr_screen\n lda TABLE_HIGH_TEXT,y\n sta vi_ptr_screen+1\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6492595672607422,
"alphanum_fraction": 0.670304000377655,
"avg_line_length": 18.439393997192383,
"blob_id": "dcaeaf95159dc5a9f092bc6677a707df6c212861",
"content_id": "4ff74f1097adea9d4b97212e19f6b51423807ec2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1283,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 66,
"path": "/docs/kernel/primitives/xmainargs.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XMAINARGS (argmains)\n\nID primitive : $2C\n\n## Description\n\nReturn argc and argv or a ptr with a content of the command line\n\n## Input\n\nA => mode\n\nmode 0 : return a struct of args\n\nmode 1 : return the string of the command line (ptr)\n\n## Output\n\nMode 0 :\n\n* A & Y :contains ptr to XMAINARGS Struct\n* X: number of args\n\nMode 1 :\n\n* A & Y : a ptr to a new malloc which contains commandline\n\n## Example\n\nMode 0 : parse command line and build argc/argv\n\n```ca65\n .include \"telestrat.inc\"\n\n XMAINARGS = $2C\n\n lda #$00 ; Mode 0\n BRK_TELEMON XMAINARGS\n\n stx save_argc\n sta save_argv\n sty save_argv+1\n rts\n```\n\nMode 1 : return command line\n\n```ca65\n .include \"telestrat.inc\"\n XMAINARGS = $2C\n\n lda #$01 ; Mode 0\n BRK_TELEMON XMAINARGS\n\n sta ptr_cmd\n sty ptr_cmd+1 ; ptr_cmd contains the ptr to the command line. It allocates a string, it needs to be free when the program does not need it anymore\n rts\n```\n\n!!! tip \"See [initmainargs](../../developer_manual/orixsdk_macros/initmainargs/) macro from orix-sdk to use it \"\n\n!!! warning \"XMAINARGS allocates a chunk in memory, it must be free at the end of the use of the parameters\"\n\n!!! warning \"Mode 0 is available since v2022.4\"\n\n!!! fail \"XGETARGV does not handle \"\" yet and \"\\ in parameters\"\n"
},
{
"alpha_fraction": 0.7213488817214966,
"alphanum_fraction": 0.7352941036224365,
"avg_line_length": 55.159420013427734,
"blob_id": "3b2b00cebbd3b18d425bee152aa52927a044ed09",
"content_id": "f507349920f10033fdcbf11f4a254bb7d78b10f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3944,
"license_type": "no_license",
"max_line_length": 418,
"num_lines": 69,
"path": "/docs/developer_manual/buildrom.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Build an Orix rom\r\n\r\n## Guideline\r\n\r\n* In order to have commands available from shell, some rules must be set in the bank structure ([https://github.com/orix-software/empty-rom/](https://github.com/orix-software/empty-rom/)). Bank structure must be used, or commands won't be accessed from the kernel or commands (As [help](/commands/help/) -b XX)\r\n\r\n* A ROM will be in an ROM slot or a RAM slot.\r\n\r\nRam slot is useful if some values must be set internaly (and it avoid malloc use, but it's not the way a ROM should not done if the process can be launched twice at the same time).\r\n\r\nThe twilighte board had been released without the sram saved with a battery, there is a lot more twilighte board in the workd without SRAM saved by battery than with a battery. In that case, if a .rom is coded with the behavior to keep information into it's bank slot (like configuration), it will lost (and the content of the rom) when oric is switch off.\r\n\r\n!!! warning \"Since shell 2022.4, it's possible to load roms with a shortcut from a config file in /etc\"\r\n\r\nSome existing roms are in a eeprom slot (kernel, shell, monitor, forth ..). Systemd rom is a rom loaded into RAM bank.\r\n\r\n* C language can be used for bank, but the 16KB of a bank is quickly filled with C code.\r\n\r\n* [orix-sdk](/home/orixsdk/) must be intensively used, and if it can't be used, kernel primitives must be used. If a universal feature is missing in kernel, it's better to insert into kernel than develop only for the ROM.\r\n\r\n* Each rom contains 1 or X commands.\r\n\r\n* In order to have multitasking in the future, commands must use malloc and free from kernel (and file operations from the kernel)\r\n\r\n* commands must use zero page from $80 to $80+26 because kernel saves 26 bytes from $80 to $80+26 in order to swap processes. Address $80 to $80+26 are reserved for binary functionnality. It's also possible to use RES and RESB (used in kernel primitive), if you don't call kernel primitives, or else, it well be erased in each kernel primitive call\r\n\r\n* limit for a rom is 16KB. But, it's maybe enough because many operations are done with kernel calls (fopen for example).\r\n\r\n* Kernel is always **available** (Bank 7) when \"set\" banking is RAM or EEPROM (kernel is always available during swapping banks with twilighte banking register ($343)).\r\n\r\n## Bank Structure\r\n\r\nIf you want to build an orix compatible rom, a special format is used for Orix bank.\r\n\r\nA rom template can be downloaded here : [https://github.com/orix-software/empty-rom/](https://github.com/orix-software/empty-rom/)\r\n\r\n### Rom definition\r\n\r\n* $fff0 : 1 byte, rom type (Value 0 : empty bank, value 1 : command bank)\r\n\r\n## Zero page\r\n\r\nuserzp is equal to $8C. It's a range from $8C to $8C+16 which is saved for each process, when a process is forked\r\n\r\nKernel uses some address in zero page. But userzp to userzp+16 offset are reserved for binary, Orix banks. Address below userzp can be used, but kernel could erase it when IRQ or Kernels calls are performed\r\n\r\nKernel saves 16 bytes (zeropage) when a binary is forked. It means that, a [XEXEC](../../kernel/primitives/xexec) calls in a binary, kernel will save PPID offset from userzp to userzp+16, and kernel will restore theses offsets when the PID has finished. In the future, multitasking will work in the same ways, and there is no guarantee that a binary will have its values restores when offset are greater than userzp+16\r\n\r\n## Launch the ROM\r\n\r\n### Oricutron\r\n\r\nIf it's a rom which must be in a ram slot, the config file (twilighte.cfg) must be set it in in bankram section or else insert it in bankrom section.\r\n\r\n### Real\r\n\r\nRom can be loaded on eeprom easily, it will erase 4 banks :\r\n\r\n``` bash\r\n/#orixcfg -r -s 0 myrom.rom\r\n```\r\n\r\nRom must be loaded into ram slot, it will erase only one bank :\r\n\r\n``` bash\r\n/#orixcfg -w -s 0 -b 4 myrom.rom\r\n```\r\n\r\nAt this step if everything is OK, each commands from the bank can be accessed from shell.\r\n"
},
{
"alpha_fraction": 0.4976525902748108,
"alphanum_fraction": 0.5070422291755676,
"avg_line_length": 10.529411315917969,
"blob_id": "e59638457ab7d04f613896bb74ee0898705b18ce",
"content_id": "2cb7f772d7c753505ddd40dcc39f3cf258bdd498",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 213,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 17,
"path": "/docs/kernel/primitives/xhexa.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XHEXA\r\n\r\n## Usage\r\n\r\nConvert a number in A and returns hexadecimal to AY\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #$FF\r\n BRK_KERNEL XHEXA\r\n ; A = 'F'\r\n ; Y = 'F'\r\n rts\r\n```\r\n"
},
{
"alpha_fraction": 0.523809552192688,
"alphanum_fraction": 0.5646258592605591,
"avg_line_length": 14.55555534362793,
"blob_id": "2a27aa761946b056a08174699071590de2354f9a",
"content_id": "ff79bcc8b9cfd076d13b5b6b7d2b5556c1dc60ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 9,
"path": "/kernel/docs/primitives/xscroh.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XSCROH (Scrollup text mode)\r\n\r\nscroll all lines from bottom to the top\r\n\r\n```ca65 \r\n ldx #$01\r\n ldy #26\r\n BRK_KERNEL XSCROH\r\n```"
},
{
"alpha_fraction": 0.3406265676021576,
"alphanum_fraction": 0.49477872252464294,
"avg_line_length": 16.95535659790039,
"blob_id": "23cd7d2bf390dbb94c82616a09e09acb9049dbe9",
"content_id": "2d6a3320c4a80de5ad29119a4f8dcfde13c894f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2011,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 112,
"path": "/doxygen/doxybook_output/Files/tables_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/tables.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/tables.s\n\n\n\n## Attributes\n\n| | Name |\n| -------------- | -------------- |\n| char | **[TABLE_LOW_TEXT](Files/tables_8s.md#variable-table-low-text)** |\n| char | **[TABLE_HIGH_TEXT](Files/tables_8s.md#variable-table-high-text)** |\n\n\n\n## Attributes Documentation\n\n### variable TABLE_LOW_TEXT\n\n```cpp\nchar TABLE_LOW_TEXT;\n```\n\n\n### variable TABLE_HIGH_TEXT\n\n```cpp\nchar TABLE_HIGH_TEXT;\n```\n\n\n\n## Source code\n\n```cpp\nTABLE_LOW_TEXT:\n;0\n .byt <($bb80)\n .byt <($bb80+40)\n .byt <($bb80+80)\n .byt <($bb80+120)\n .byt <($bb80+160)\n .byt <($bb80+200)\n .byt <($bb80+240)\n .byt <($bb80+280)\n .byt <($bb80+320)\n .byt <($bb80+360)\n .byt <($bb80+400)\n .byt <($bb80+440)\n .byt <($bb80+480)\n .byt <($bb80+520)\n .byt <($bb80+560)\n .byt <($bb80+600)\n .byt <($bb80+640)\n .byt <($bb80+680)\n .byt <($bb80+720)\n .byt <($bb80+760)\n .byt <($bb80+800)\n .byt <($bb80+840)\n .byt <($bb80+880)\n .byt <($bb80+920)\n .byt <($bb80+960)\n .byt <($bb80+1000)\n .byt <($bb80+1040)\n .byt <($bb80+1080)\n\n\nTABLE_HIGH_TEXT:\n .byt >($bb80)\n .byt >($bb80+40)\n .byt >($bb80+80)\n .byt >($bb80+120)\n .byt >($bb80+160)\n ;5\n .byt >($bb80+200)\n .byt >($bb80+240)\n .byt >($bb80+280)\n .byt >($bb80+320)\n .byt >($bb80+360)\n ;10\n .byt >($bb80+400)\n .byt >($bb80+440)\n .byt >($bb80+480)\n .byt >($bb80+520)\n .byt >($bb80+560)\n ;15\n .byt >($bb80+600)\n .byt >($bb80+640)\n .byt >($bb80+680)\n .byt >($bb80+720)\n .byt >($bb80+760)\n ;20\n .byt >($bb80+800)\n .byt >($bb80+840)\n .byt >($bb80+880)\n .byt >($bb80+920)\n .byt >($bb80+960)\n ;25\n .byt >($bb80+1000)\n ; 26\n .byt >($bb80+1040)\n ; 27\n .byt >($bb80+1080)\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.33429309725761414,
"alphanum_fraction": 0.34824657440185547,
"avg_line_length": 29.78636360168457,
"blob_id": "93221a5fe439c2a68197c74b8fb5ddea937f85af",
"content_id": "3ac0e081cc39ef7f3d8bf53f793c29d86ef040b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13583,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 440,
"path": "/doxygen/ca65doxy.py",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nfrom __future__ import print_function\n\nfrom pprint import pprint\n\nimport fileinput\n\n#\n# Configuration\n#\nvar_types = {\n '.addr': 'unsigned int',\n '.byte': 'char',\n '.byt': 'char',\n '.word': 'int',\n '.dword': 'double',\n '.res': 'char',\n '.asciiz': 'const char'\n }\n\nextern_is_fn = True\n#macro_dcl = 'inline macro'\nmacro_dcl = '#define'\n# equ_dcl = '#define'\nequ_dcl = 'const char'\n\n#\n# Pas de modifications au delà de cette limite\n#\ndef_bloc = False\ndef_cbloc = False\n\n# TESTS\ndef_chrisbloc = False\n\ndef_struct = False\nstruct_name = ''\ndef_proc = False\nproc_name = ''\ndef_macro = False\n\n\nlast_label = ''\n\nfor line in fileinput.input():\n\n line = line.rstrip()\n\n #if fileinput.isfirstline():\n # fname = fileinput.filename()\n # print('\\n'.join(['/**', ' * @file '+fname, '*/']))\n\n if line:\n inst = line.split()\n nb_inst = len(inst)\n\n line_out = ''\n\n # pprint(inst)\n\n if def_bloc:\n if inst[0] == ';;':\n def_bloc = False\n line_out = '*/'\n\n else:\n line_out = line[1:]\n\n # Au cas où il manque un ' ' entre le ';' et le commentaire\n #if len(inst[0]) > 1 and inst[0][1] == ';':\n # line_out += inst[0][1:]\n # print('----', line)\n\n #if nb_inst > 1:\n # line_out += ' '.join(inst[1:])\n\n elif def_cbloc:\n line_out = line\n\n if inst[0] == '*/':\n def_cbloc = False\n\n elif def_chrisbloc:\n line_out = line\n\n if len(line) > 1:\n line_out = line[1:]\n\n if inst[0] == ';' and nb_inst > 1 and inst[1][:2] == '==':\n def_chrisbloc = False\n line_out = '</pre> */'\n\n elif def_proc:\n if inst[0] == '.endproc':\n def_proc = False\n\n proc_name = ''\n line_out = '};'\n\n # Appel à une fonction\n elif inst[0].lower() == 'jsr':\n line_out = inst[1]+'();'\n\n # [--- TEST\n # Déclaration d'une zone mémoire\n elif last_label and inst[0] in var_types:\n if inst[0] in ['.byte', '.byt']:\n\n line_out = var_types['.byte']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.word':\n line_out = var_types['.word']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.addr':\n line_out = var_types['.addr']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.dword':\n line_out = var_types['.dword']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.res':\n line_out = var_types['.res']+' %s[%s];' % (last_label, ' '.join(inst[1:]))\n\n elif inst[0] == '.asciiz':\n init = ' '.join(inst[1:])\n # -2 à cause des \"\", +1 à cause du \\00 final\n var_len = len(init)-2+1\n line_out = var_types['.asciiz']+' %s[%s] = %s;' % (last_label, var_len, init)\n\n else:\n line_out = ''\n\n last_label = ''\n\n elif nb_inst == 1:\n # Label?\n # Pas d'espace avant un label\n if not line[0] in [' ', '.', ';', '@', '\\t']:\n if inst[0][-1] == ':':\n last_label = inst[0][:-1]\n line_out = ''\n\n else:\n last_label = inst[0]\n line_out = ''\n # --]\n else:\n # On ne prend rien en compte dans la fonction\n # TODO: Ajouter la prise en charge des variables et les déclarer privées?\n # line_out = '????: '+line\n if line[0] !=';':\n last_label = ''\n\n line_out = ''\n\n elif def_struct:\n if inst[0] == '.endstruct':\n def_struct = False\n\n line_out = '} '+struct_name+';'\n\n else:\n # Traitement des membres de la structure\n if nb_inst >= 2 and inst[1].lower() in var_types:\n inst[1] = inst[1].lower()\n cmnt = ''\n\n if inst[1] in ['.byte', '.byt']:\n line_out = var_types['.byte']+' '+inst[0]+';'\n\n if nb_inst > 3:\n cmnt = ' '.join(inst[3:])\n\n elif inst[1] == '.word':\n line_out = var_types['.word']+' '+inst[0]+';'\n\n if nb_inst > 3:\n cmnt = ' '.join(inst[3:])\n\n elif inst[1] == '.addr':\n line_out = var_types['.addr']+' '+inst[0]+';'\n\n if nb_inst > 3:\n cmnt = ' '.join(inst[3:])\n\n elif inst[1] == '.dword':\n line_out = var_types['.dword']+' '+inst[0]+';'\n\n if nb_inst > 3:\n cmnt = ' '.join(inst[3:])\n\n elif inst[1] == '.res':\n # Vérifier si inst[3] == ';' au cas où?\n\n line_out = var_types['.res']+' %s[%s];' % (inst[0], inst[2])\n\n if nb_inst > 4:\n cmnt = ' '.join(inst[4:])\n\n else:\n line_out = ''\n\n if line_out and cmnt:\n # line_out = '/** '+cmnt+' */ '+line_out\n line_out += '/*!< '+cmnt+' */'\n\n elif def_macro:\n line_out = ''\n if inst[0] == '.endmacro':\n def_macro = False\n\n if macro_dcl !='#define':\n line_out = '};'\n\n else:\n if inst[0] == ';;':\n def_bloc = True\n\n line_out = '/** '\n if nb_inst >=2:\n if inst[1] == '/**':\n inst[1] = ''\n\n line_out += ' '.join(inst[1:])\n\n elif inst[0] in ['/*', '/**']:\n def_cbloc = True\n line_out = line\n\n elif inst[0] == ';' and nb_inst > 1 and inst[1][:2] == '==':\n def_chrisbloc = True\n line_out = '/** <pre>'\n\n elif inst[0] == '.proc':\n def_proc = True\n\n # Ne prend pas en compte un éventuel commentaire sur la ligne .proc\n # TODO: Ajouter un @brief pour le prendre en compte?\n proc_name = inst[1]\n\n line_out = proc_name + '() {'\n\n elif inst[0] == '.struct':\n def_struct = True\n\n # Ne prend pas en compte un éventuel commentaire sur la ligne .proc\n # TODO: Ajouter un @brief pour le prendre en compte?\n struct_name = inst[1]\n\n line_out = 'typedef struct {'\n elif inst[0] == ';@param':\n line_out = '/** @param '+ inst[1] + '*/'\n\n elif inst[0] == '.macro':\n def_macro = True\n\n line_out = macro_dcl + ' ' + inst[1] +'('\n\n if nb_inst > 2:\n if macro_dcl != '#define':\n line_out += 'void ' + ''.join(inst[2:]).replace(',', ', void ')\n else:\n line_out += ' '.join(inst[2:])\n\n line_out += ')'\n if macro_dcl != '#define':\n line_out += ' {'\n\n elif inst[0] == '.define':\n # ATTENTION: un commentaire à la fin de la ligne peut poser problème\n if nb_inst > 3 and inst[3] == ';':\n line_out = '/** ' + ' '.join(inst[4:]) + ' */ '\n line_out += '#define ' + inst[1] + ' ' + inst[2]\n\n else:\n line_out = '#define ' + inst[1] + ' ' + ' '.join(inst[2:])\n\n elif inst[0] == '.include':\n # Pas de prise en compte d'un éventuel commentaire en fin de ligne\n # TODO: A prende en compte?\n line_out = '#include '+inst[1]\n\n elif inst[0] == '.tag':\n line_out = '%s %s;' % (inst[1], last_label)\n\n elif inst[0] == '.import':\n if extern_is_fn:\n line_out = 'extern ' + inst[1] +'();'\n\n else:\n line_out = 'extern ' + inst[1] +';'\n\n\n # Déclaration d'une zone mémoire\n elif last_label and inst[0].lower() in var_types:\n inst[0] = inst[0].lower()\n if inst[0] in ['.byte', '.byt']:\n\n line_out = var_types['.byte']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.word':\n line_out = var_types['.word']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.addr':\n line_out = var_types['.addr']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.dword':\n line_out = var_types['.dword']+' '+last_label\n\n var_len = inst[1].count(',')+1\n if var_len > 1:\n line_out += '[%d]' % var_len\n\n line_out += ';'\n\n elif inst[0] == '.res':\n line_out = var_types['.res']+' %s[%s];' % (last_label, ' '.join(inst[1:]))\n\n elif inst[0] == '.asciiz':\n init = ' '.join(inst[1:])\n # -2 à cause des \"\", +1 à cause du \\00 final\n var_len = len(init)-2+1\n line_out = var_types['.asciiz']+' %s[%s] = %s;' % (last_label, var_len, init)\n\n else:\n line_out = ''\n\n last_label = ''\n\n elif nb_inst == 1:\n # Label?\n # Pas d'espace avant un label\n if not line[0] in [' ', '.', ';', '@', '\\t']:\n if inst[0][-1] == ':':\n last_label = inst[0][:-1]\n line_out = ''\n\n else:\n last_label = inst[0]\n line_out = ''\n\n\n elif nb_inst >= 3:\n # Déclaration d'une variable / label\n if inst[1] in ['=', ':=']:\n if nb_inst > 3 and inst[3] == ';':\n line_out = '/** ' + ' '.join(inst[4:]) + ' */ '\n\n if equ_dcl == '#define':\n line_out += '#define ' + inst[0] + ' ' + inst[2]\n else:\n line_out += equ_dcl + ' ' + inst[0] + ' = ' + inst[2] + ';'\n\n else:\n if equ_dcl == '#define':\n line_out = '#define '+inst[0] + ' ' + ' '.join(inst[2:])\n else:\n line_out = equ_dcl + ' '+inst[0] + '= ' + ' '.join(inst[2:]) + ';'\n\n #elif inst[1] == ':=':\n # if nb_inst > 3 and inst[3] == ';':\n # line_out = '/** ' + ' '.join(inst[4:]) + ' */ '\n # line_out += '#define ' + inst[0] + ' ' + inst[2]\n\n # else:\n # line_out = '#define '+inst[0] + ' ' + ' '.join(inst[2:])\n\n elif inst[1] == ';':\n if inst[0][-1] == ':':\n last_label = inst[0][:-1]\n else:\n last_label = inst[0]\n\n # Ne marche pas, il faudrait connaitre la ligne suivante\n # pour savoir si le commentaire se rapporte à elle\n # line_out = '/** ' + ' '.join(inst[2:]) + ' */'\n\n else:\n line_out = ''\n\n if inst[0][0] not in ['.', ';']:\n last_label = ''\n\n else:\n # line_out = '????: '+line\n if line[0] !=';':\n last_label = ''\n line_out = ''\n\n\n print(line_out)\n\n else:\n print('')\n\nfname = fileinput.filename()\nprint('\\n'.join(['/**', ' * @file '+fname, '*/']))"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 11.29411792755127,
"blob_id": "5b716ce644b0cbc5c168a72e2bfa55218236a08d",
"content_id": "4b8233d4f31d5bbd0184ce5fb2447ba08a782620",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 17,
"path": "/docs/commands/ioports.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# ioports\n\n## Introduction\n\nDisplay I/O Ports of the board\n\n## SYNOPSYS\n\n+ #ioports\n\n## DESCRIPTION\n\nDisplays I/O ports.\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/ioports.asm\n"
},
{
"alpha_fraction": 0.4747399687767029,
"alphanum_fraction": 0.5096582174301147,
"avg_line_length": 15.023809432983398,
"blob_id": "e634f061a758c3d7111eb42485cda1a33f43406f",
"content_id": "24acff773e6e7d7b11b3f7e4fda6e48fbe482d40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1346,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 84,
"path": "/doxygen/doxybook_output_vi/Files/vi__fill__last__line_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_fill_last_line.s\n\n---\n\n# vi_fill_last_line.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_fill_last_line](Files/vi__fill__last__line_8s.md#Routine-vi-fill-last-line)** |\n\n\n## Routine documentation\n\n### Routine vi_fill_last_line\n\n```ca65\nvi_fill_last_line\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_fill_last_line\n\n ; A and Y contains the ptr on the file to display on the last line\n sta vi_ptr1\n sty vi_ptr1+1\n\n ldx #$00\n@compute_empty_line_loop:\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1\n cmp vi_tmp1\n bne @not_eof\n\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr1+1\n cmp vi_tmp1\n bne @not_eof\n ; reached the end of the file\n rts\n\n@not_eof:\n inc vi_ptr1\n bne @S40\n inc vi_ptr1+1\n\n@S40:\n ldy #$00\n lda (vi_ptr1),y\n cmp #CR\n beq @exit\n cmp #LF\n beq @compute_empty_line_loop\n sta VI_EDITION_LAST_VIDEO_ADRESS,x\n inx\n cpx #VI_EDITOR_MAX_COLUMN+1\n beq @exit\n jmp @compute_empty_line_loop\n\n@exit:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5038167834281921,
"alphanum_fraction": 0.580152690410614,
"avg_line_length": 7.214285850524902,
"blob_id": "f794fe29ae3caa54067b4a99778710decc7809d3",
"content_id": "837f38344d2af2f4691e031a1ef687d01b179bab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 14,
"path": "/docs/tutorials/advanced_submit_part1.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Scripts submit avancés\r\n\r\n## Prérequis\r\n\r\nKernel v2023.2\r\nsubmit 2022.4\r\n\r\n```bash\r\n#!/bin/submit\r\n\r\n\r\n```\r\n\r\n## Restore from\r\n\r\n"
},
{
"alpha_fraction": 0.5583634376525879,
"alphanum_fraction": 0.5824307799339294,
"avg_line_length": 17.065217971801758,
"blob_id": "957b8612bb6cc3f23db3306482b9670ad7da8234",
"content_id": "b6fbd4ec6d432e42f1af4fb365b39da1d5ef723c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 831,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 46,
"path": "/doxygen/doxybook_output/Files/vi__length__file__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_length_file_sub_sub.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_length_file_sub_sub.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_length_file_sub_sub](Files/vi__length__file__sub__sub_8s.md#function-vi-length-file-sub-sub)**() |\n\n\n## Functions Documentation\n\n### function vi_length_file_sub_sub\n\n```cpp\nvi_length_file_sub_sub()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_length_file_sub_sub\n ; add length_file=length_file--\n lda vi_length_file\n bne @out\n dec vi_length_file+1\n\n@out:\n dec vi_length_file\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5318725109100342,
"alphanum_fraction": 0.5500853657722473,
"avg_line_length": 17.49473762512207,
"blob_id": "49a942eed786de30fae07c94ce5c7d25fad7f011",
"content_id": "0dbd770a4583934e783f85647f05a1e717e55c1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3514,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 190,
"path": "/doxygen/doxybook_output/Files/vi__display__char_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_display_char.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_display_char.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_display_char](Files/vi__display__char_8s.md#function-vi-display-char)**() |\n\n\n## Functions Documentation\n\n### function vi_display_char\n\n```cpp\nvi_display_char()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_display_char\n sta vi_tmp2\n\n ; Avoid to display a line greater than 80 chars ?\n\n lda vi_ptr_file_used\n ldy vi_ptr_file_used+1\n jsr vi_strlen_current_line\n\n cpx #80-1\n bcc @add_char\n rts\n@add_char:\n@not_eol:\n jsr vi_editor_switch_off_cursor\n\n jsr vi_check_eof\n\n cmp #IS_EOF ; Is eof ?\n beq @not_eof\n\n\n jsr vi_shift_file_from_memory_one_char\n ; Scroll now\n ; if posx==VI_EDITOR_MAX_COLUMN then skip scroll\n\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n cmp #VI_EDITOR_MAX_COLUMN\n bne @scroll\n\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n lda vi_tmp2 ; get char\n sta (vi_ptr_screen),y\n\n ldy #$00\n sta (vi_ptr_file_used),y ; Insert into file\n jsr vi_vi_ptr_file_used_plus_plus\n\n jsr vi_set_xpos_0\n\n jsr vi_ypos_screen_plus_plus\n\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n cmp #VI_LAST_LINE_EDITOR\n beq @scroll_last_line\n tax\n scroll down,,26 ; Because the second empty arg is provided\n\n@scroll_last_line:\n ; FIXME\n\n rts\n\n@scroll:\n jsr vi_scroll_from_left_to_right_full_line\n\n\n\n\n@not_eof:\n ; Check if we need to scroll\n\n jsr vi_check_inserted_char_overflow_the_max_column\n\n cmp #$03\n beq @scroll_screen\n cmp #$01\n beq @L100\n bne @line_not_full\n\n@scroll_screen:\n\n ; jsr vi_ypos_screen_plus_plus\n ; cmp #$01 ; We are on the last line\n ; bne @not_last_line\n ; ; Code for lastline\n\n@not_last_line:\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n tax\n inx\n\n scroll down, , 26 ; Yes scroll\n\n@L100:\n\n ; scroll the next line\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n clc\n adc #$01\n pha\n jsr vi_shift_line_left_to_right_editor\n pla\n tay\n lda TABLE_LOW_TEXT,y\n sta vi_ptr1\n lda TABLE_HIGH_TEXT,y\n sta vi_ptr1+1\n\n\n ldy #vi_struct_data::xpos_screen\n lda #VI_EDITOR_MAX_COLUMN+1\n sec\n sbc (vi_struct),y\n\n\n tay\n lda (vi_ptr_file_used),y\n ldy #$00\n sta (vi_ptr1),y\n\n\n\n\n\n@line_not_full:\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n lda vi_tmp2 ; get char\n sta (vi_ptr_screen),y\n\n sty vi_tmp1\n\n ldy #$00\n sta (vi_ptr_file_used),y ; Insert into file\n\n jsr vi_vi_ptr_file_used_plus_plus\n jsr vi_ptr_last_char_plus_plus\n jsr vi_length_file_plus_plus\n\n\n; ; end_insert char on screen\n\n\n jsr vi_xpos_screen_plus_plus\n cmp #$01 ; Are we on the end of line ?\n bne @exit\n\n jsr vi_ypos_screen_plus_plus\n\n ; x=0\n lda #$00\n ldy #vi_struct_data::xpos_screen\n sta (vi_struct),y\n@exit:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6039426326751709,
"alphanum_fraction": 0.6290322542190552,
"avg_line_length": 14,
"blob_id": "0ce048079a5cef807b9a4cefa0084b8f4b120f1d",
"content_id": "1265a7d1b5b1a5d0364fd5ea269fbd8d8b4a8e70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 558,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 35,
"path": "/docs/samples/c_samples/putcwd.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# chdir in cc65\r\n\r\nFor instance chdir, is not manage in cc65, then here is a workaround.\r\n\r\nCreate a file _oputcwd.s. It must contains :\r\n\r\n```ca65\r\n.include \"telestrat.inc\"\r\n\r\nXPUTCWD=$49\r\n\r\n.export _oputcwd\r\n\r\n.importzp tmp1\r\n\r\n.proc _oputcwd\r\n stx tmp1\r\n ldy tmp1\r\n\r\n BRK_TELEMON XPUTCWD\r\n rts\r\n.endproc\r\n```\r\n\r\nAnd now, you C code, you can do (don't forget to add _oputcwd.s to your command line to build _oputcwd.s when you launch cl65):\r\n\r\n```c\r\nextern unsigned char oputcwd();\r\n\r\n\r\nint main() {\r\n oputcwd(\"/etc\");\r\n return 0;\r\n}\r\n```"
},
{
"alpha_fraction": 0.5461047291755676,
"alphanum_fraction": 0.577522337436676,
"avg_line_length": 92.21428680419922,
"blob_id": "ca85cc900cda2d9917f6f5112e6e17c962f4ec83",
"content_id": "87b26cfb3ca811d2ac26d6cb9002dbfb378ed7f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7830,
"license_type": "no_license",
"max_line_length": 245,
"num_lines": 84,
"path": "/doxygen/doxybook_output/index_files.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Files\n\n---\n\n# Files\n\n\n\n\n* **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi](Files/dir_e433504a3e785b34aabb2c6185efc4a1.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi)** \n * **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src](Files/dir_6c260d28152e78a3ffcc2e06b7438967.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src)** \n * **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/commands](Files/dir_e1568de7a9ec0caf269f7729a27efb24.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/commands)** \n * **file [s](Files/vi_8s.md#file-vi.s)** \n * **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/data](Files/dir_eb94e028ad508402029845f2921e79f7.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/data)** \n * **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/data/vi](Files/dir_834496eb029ed14441e8790c53896f5f.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/data/vi)** \n * **file [s](Files/strings_8s.md#file-strings.s)** \n * **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions](Files/dir_2288eccfea1af74b995388678c757cc0.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/functions)** \n * **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc](Files/dir_8a0a2fbb0e248d2b08adec17bb698d4e.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/functions/subfunc)** \n * **dir [/mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi](Files/dir_a5544c2bf0b70f8d417c4d3bfea04409.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src/functions/subfunc/vi)** \n * **file [s](Files/vi__add__char__to__text_8s.md#file-vi-add-char-to-text.s)** \n * **file [s](Files/vi__check__0A_8s.md#file-vi-check-0a.s)** \n * **file [s](Files/vi__check__beginning__of__file_8s.md#file-vi-check-beginning-of-file.s)** \n * **file [s](Files/vi__check__eof_8s.md#file-vi-check-eof.s)** \n * **file [s](Files/vi__check__if__previous__line__was__truncated_8s.md#file-vi-check-if-previous-line-was-truncated.s)** \n * **file [s](Files/vi__check__inserted__char__overflow__the__max__column_8s.md#file-vi-check-inserted-char-overflow-the-max-column.s)** \n * **file [s](Files/vi__clear__command__line_8s.md#file-vi-clear-command-line.s)** \n * **file [s](Files/vi__compute__last__text__line_8s.md#file-vi-compute-last-text-line.s)** \n * **file [s](Files/vi__compute__video__adress_8s.md#file-vi-compute-video-adress.s)** \n * **file [s](Files/vi__copy__arg1__to__name__file__open_8s.md#file-vi-copy-arg1-to-name-file-open.s)** \n * **file [s](Files/vi__decal__text_8s.md#file-vi-decal-text.s)** \n * **file [s](Files/vi__display__char_8s.md#file-vi-display-char.s)** \n * **file [s](Files/vi__display__file__opened_8s.md#file-vi-display-file-opened.s)** \n * **file [s](Files/vi__fill__last__line_8s.md#file-vi-fill-last-line.s)** \n * **file [s](Files/vi__init__vi__struct_8s.md#file-vi-init-vi-struct.s)** \n * **file [s](Files/vi__key__del_8s.md#file-vi-key-del.s)** \n * **file [s](Files/vi__key__down_8s.md#file-vi-key-down.s)** \n * **file [s](Files/vi__key__enter_8s.md#file-vi-key-enter.s)** \n * **file [s](Files/vi__key__left_8s.md#file-vi-key-left.s)** \n * **file [s](Files/vi__key__right_8s.md#file-vi-key-right.s)** \n * **file [s](Files/vi__key__up_8s.md#file-vi-key-up.s)** \n * **file [s](Files/vi__length__file__plus__plus_8s.md#file-vi-length-file-plus-plus.s)** \n * **file [s](Files/vi__length__file__sub__sub_8s.md#file-vi-length-file-sub-sub.s)** \n * **file [s](Files/vi__ptr__file__used__plus__plus_8s.md#file-vi-ptr-file-used-plus-plus.s)** \n * **file [s](Files/vi__ptr__file__used__plus__plus__and__check__eof_8s.md#file-vi-ptr-file-used-plus-plus-and-check-eof.s)** \n * **file [s](Files/vi__ptr__file__used__sub__sub_8s.md#file-vi-ptr-file-used-sub-sub.s)** \n * **file [s](Files/vi__ptr__last__char__add_8s.md#file-vi-ptr-last-char-add.s)** \n * **file [s](Files/vi__ptr__last__char__plus__plus_8s.md#file-vi-ptr-last-char-plus-plus.s)** \n * **file [s](Files/vi__ptr__last__char__sub__sub_8s.md#file-vi-ptr-last-char-sub-sub.s)** \n * **file [s](Files/vi__scroll__from__left__to__right__full__line_8s.md#file-vi-scroll-from-left-to-right-full-line.s)** \n * **file [s](Files/vi__scroll__to__left_8s.md#file-vi-scroll-to-left.s)** \n * **file [s](Files/vi__search__next__line_8s.md#file-vi-search-next-line.s)** \n * **file [s](Files/vi__search__previous__cr_8s.md#file-vi-search-previous-cr.s)** \n * **file [s](Files/vi__search__previous__line__beginning_8s.md#file-vi-search-previous-line-beginning.s)** \n * **file [s](Files/vi__set__ptr__last__char_8s.md#file-vi-set-ptr-last-char.s)** \n * **file [s](Files/vi__set__xpos__0_8s.md#file-vi-set-xpos-0.s)** \n * **file [s](Files/vi__set__xpos__from__A_8s.md#file-vi-set-xpos-from-a.s)** \n * **file [s](Files/vi__shift__file__from__memory__one__char_8s.md#file-vi-shift-file-from-memory-one-char.s)** \n * **file [s](Files/vi__shift__line__left__to__right__editor_8s.md#file-vi-shift-line-left-to-right-editor.s)** \n * **file [s](Files/vi__strlen__current__line_8s.md#file-vi-strlen-current-line.s)** \n * **file [s](Files/vi__vi__ptr__file__used__plus__plus_8s.md#file-vi-vi-ptr-file-used-plus-plus.s)** \n * **file [s](Files/vi__xpos__screen__plus__plus_8s.md#file-vi-xpos-screen-plus-plus.s)** \n * **file [s](Files/vi__xpos__screen__sub__sub_8s.md#file-vi-xpos-screen-sub-sub.s)** \n * **file [s](Files/vi__ypos__screen__plus__plus_8s.md#file-vi-ypos-screen-plus-plus.s)** \n * **file [s](Files/vi__ypos__screen__sub__sub_8s.md#file-vi-ypos-screen-sub-sub.s)** \n * **file [s](Files/__clrscr__vi_8s.md#file--clrscr-vi.s)** \n * **file [s](Files/tables_8s.md#file-tables.s)** \n * **file [s](Files/vi__command__edition_8s.md#file-vi-command-edition.s)** \n * **file [s](Files/vi__displays__info_8s.md#file-vi-displays-info.s)** \n * **file [s](Files/vi__edition__keyboard_8s.md#file-vi-edition-keyboard.s)** \n * **file [s](Files/vi__editor__switch__off__cursor_8s.md#file-vi-editor-switch-off-cursor.s)** \n * **file [s](Files/vi__editor__switch__on__cursor_8s.md#file-vi-editor-switch-on-cursor.s)** \n * **file [s](Files/vi__fill__screen__with__empty__line_8s.md#file-vi-fill-screen-with-empty-line.s)** \n * **file [s](Files/vi__put__char_8s.md#file-vi-put-char.s)** \n * **file [s](Files/vi__set__length__file_8s.md#file-vi-set-length-file.s)** \n * **file [s](Files/vi__struct_8s.md#file-vi-struct.s)** \n * **file [s](Files/vi__switch__to__edition__mode_8s.md#file-vi-switch-to-edition-mode.s)** \n * **file [s](Files/rom_8s.md#file-rom.s)** \n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.4659533202648163,
"alphanum_fraction": 0.5,
"avg_line_length": 13.685714721679688,
"blob_id": "65298f74fd2806519b77fded372d006bb908307a",
"content_id": "ec90a53ce994615520d96bbbd01cd67f970f79d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1028,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 70,
"path": "/docs/tools_docs/vi/Files/vi__check__eof_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_check_eof.s\n\n---\n\n# vi_check_eof.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_eof](Files/vi__check__eof_8s.md#Routine-vi-check-eof)** |\n\n\n## Routine documentation\n\n### Routine vi_check_eof\n\n```ca65\nvi_check_eof\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_check_eof\n ; return in A 00 if eof\n ; returns in A 01 if not eof\n\n ; Do we reached eof ?\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr_file_used\n cmp vi_tmp1\n bne @not_eof\n\n ldy #vi_struct_data::ptr_last_char_file+1\n lda (vi_struct),y\n sta vi_tmp1\n\n lda vi_ptr_file_used+1\n cmp vi_tmp1\n bne @not_eof\n@is_eof:\n lda #IS_EOF ; EOF\n rts\n\n@is_eof_before:\n jsr vi_ptr_file_used_plus_plus\n jmp @is_eof\n\n@not_eof:\n\n@not_last_line:\n lda #$01 ; Not eof\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7310924530029297,
"alphanum_fraction": 0.7394958138465881,
"avg_line_length": 13,
"blob_id": "5f39e176b2994e6a12a4a547e0591a908475bfea",
"content_id": "2414feac13582b5cc3f52f50596c66b25c02750c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 238,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/docs/commands/reboot.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# reboot\n\n## Introduction\n\n reboot machine\n\n## SYNOPSYS\n\n+ reboot\n\n## DESCRIPTION\n\nReboot the machine: it call NMI VECTORS and flush page 2 and page 0\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/reboot.asm\n"
},
{
"alpha_fraction": 0.537864089012146,
"alphanum_fraction": 0.5566343069076538,
"avg_line_length": 17.614458084106445,
"blob_id": "16ae669dbba71ceacac75c1be0a5b72e54156050",
"content_id": "6902f987ba1cb2fb047c076c588f0d9db1441da4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1545,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 83,
"path": "/doxygen/doxybook_output/Files/vi__key__left_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_left.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_key_left.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_key_left](Files/vi__key__left_8s.md#function-vi-key-left)**() |\n\n\n## Functions Documentation\n\n### function vi_key_left\n\n```cpp\nvi_key_left()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_key_left\n jsr vi_editor_switch_off_cursor\n\n jsr vi_xpos_screen_sub_sub\n cmp #IS_BEGINNING_OF_THE_LINE\n beq @exit_and_check ; x_equal_to_zero\n\n jsr vi_ptr_file_used_sub_sub\n\n@exit:\n rts\n\n@exit_and_check:\n\n ; At this step we are on the column 0 (xpos=0)\n\n jsr vi_check_beginning_of_file\n cmp #$00\n beq @exit\n\n ; Check if the previous char is a crlf\n\n\n ; if posfile[vi_ptr_file_used]=$0A goto @exit_add\n jsr vi_ptr_file_used_sub_sub\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #LF\n beq @exit_add ; LF exit\n\n ; if posfile[vi_ptr_file_used]=$0D goto @exit_add\n ldy #$00\n lda (vi_ptr_file_used),y\n cmp #CR\n beq @exit_add ; CR ?exit\n\n jsr vi_ypos_screen_sub_sub\n\n ldy #vi_struct_data::xpos_screen\n lda #VI_EDITOR_MAX_COLUMN\n sta (vi_struct),y\n\n rts\n@exit_add:\n jsr vi_ptr_file_used_plus_plus\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.7466033697128296,
"alphanum_fraction": 0.7654280662536621,
"avg_line_length": 45.280303955078125,
"blob_id": "a2c275d746d666d3244b31a6b85b6887631bfea4",
"content_id": "bb8875699fb6e6b6a13d91fa1abcb6d9c9870564",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6168,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 132,
"path": "/docs/commands/basic11.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# basic11\n\n## Introduction\n\nStart Atmos rom. You can type basic11 or press FUNCT+B to start.\n\nLoad a personal .tap file\nWhen you starts basic11 commands, the default path is « /home/basic11/ ». Each action on the basic11 mode will be done in\nthis folder (cload/csave). If you cload a tape file, it must be in « /home/basic11 » folder.\n\nYou have downloaded a .tap file, and want to use it. Then, you can create a\nfolder /home/basic11/\nUnder Orix\n/#mkdir home\n/#cd home\n/home#mkdir basic11\n/home#cd basic11\nPut you file in this folder from your PC, and start basic11 (you don’t need to be in the «/home/basic11 » folder to start\nbasic11 with no parameter. By default, basic11 starts in « /home/basic11/ »\nOric.org tape file\nWhen you downloaded sdcard.tgz and unzip it into sdcard or usbkey device, there is many tape file included in this archive.\nYou don’t need to move these type file, if you know the key, you can starts it from commands line. In this case, it will load\nthe correct basic1.1 rom to start the tape file (see below), and the correct joystick configuration if it’s correct.\nOric.org tape file update\nEach week a new software.tgz is generated. You can download it from « repo » and unzip it on the device. It will generate\nlast tape file and last joysticks configuration.\nSearch a tape file from command line\n\nBasic11 has also many.tap files inserted in sdcard.tgz\n8\nTry to find the software with option -l\n/# basic11 -l\nIf you find your software, you can do perform ctrl+c.\nYou can type space to do a pause.\nOn that case, you can launch the tape file like :\n/# basic11 «KEYDISPLAYED\nWhen KEYDISPLAYED is the key displayed in key column. Please note that the key must be in UPPERCASE\nLoad a tap file from command line\nNote that MYFILE must be in UPPERCASE\n/# basic11 «MYFILE\nIf MYFILE is in the oric.org database, it will launch the software with the filename MYFILE.\nIf basic11 command does not find MYFILE in the oric.org database, it will try to load it from /home/basic11/ folder.\nSave your program\nIf you start « basic11 » with no options, basic rom will starts and each csave (or cload) actions will store files in « /home/basic11 »\nfolder\n\n## Start basic11 menu\n\nIf you type « basic11 -g » on command line or FUNCT+G, you will have a\nmenu with all software which have a download link on oric.org (only atmos version and when a tape file is available).\n/#basic11 -g\nYou can use left and right letters to change to a new letter. If the letter is empty, it means that there is no available tap file\nfor this letter.\nYou can use up and down link to navigate into software. If you press enter, the software will starts.\nNote that not all games are working yet. Some times, chars are corrupted. If the joysticks does not works, there is two case :\n• the game does not call rom routine to manage keyboard\n• keyboard mapping is not done yet\nYou can use arrows to navigate into the menu :\n• up and down to select the software\n• right and left to switch to the menu letters\nSome letters are empty. It means that there is no software with tape file available on oric.org for this letter\nQuit basic11\nIf you want to quit basic11 from interpreter command line, you can type « QUIT ». This will force to reboot to Orix (you can\nalso use reset button)\n\n## How the .tap file starts\nIf you only type « basic11 », this will start bank 6 (normal basic rom). The default folder in that case is «/home/basic11 »\nIf you type « basic11 » with a tape file as an argument, there is 2 cases\n1. The tape file (key) is already known in oric.org website, then basic11 try to find it in its databank file (/var/cache/basic11/\nfolder). If the key is found, it will start the tape file located in «/usr/share/basic11/... »\n2. If the key is unknown, it will try to find it in «/home/basic11 »\nIf the tap file is in the oric.org db file, basic11 will load the software configuration from the db software file (as joystick\nconfiguration, and the id of the rom). Basic11 load the right rom into ram bank, override the default basic11 path to the tape\nfile folder (« usr/share/basic11/[firstletter software].\nIt means that if you load this kind of software and you can quit the software, each file action in basic11 rom, will be performed\nin « usr/share/basic11/[firstletter software]. »\nNot working tapes (for instance)\n• All Oric-1 games can be started with FUNCT+L in ROM menu : start oric-1 (depending of your device), and put .tap\nfiles in /home/basic10\n• Software which does not work (25), but the number can be reduced in future release.\ncobra Cobra pinball Damsel in distress\nRush hour 4K\nLe diamant de l’ile maudite Durendal HU*BERT\nHunchback Schtroumpfs Stanley (ROM 0,1 tested)\nThem Titan Visif\nXenon III Dig Dog Elektro Storm\nKilburn Encounter Le tresor du pirate L’aigle d’or (ROM 0,1 tested)\nCompatible (micropuce) Volcanic demo Clavidact\nDAO Cobra Soft CW-Morse The Hellion\nMARC Caspak Kryllis : when we lost one life, the game does not restar\n\n# Tape with altered charset\n\nFire flash Scuba Dive 3D fongus (i,f letters)\nJoysticks issues\nWe did keyboard/joystick mapping for a lot of games, but we did not set the keyboard mapping for all software. If you want\nto help us, contact us.\nSome game does not work because they handle their own keyboard routine. It could be handle with hardware tricks but, it’s\nnot done.\nSome others games uses special keys (SHIFT, CTRL) for direction or the first button. Theses cases are not handle yet : but it\ncould in the future.\n\n\n## SYNOPSYS\n\n+ basic11\n+ basic11 -g\n+ basic11 -l\n+ basic11 \"MYTAPE\n\n## DESCRIPTION\n\nThis command starts the atmos rom. This rom did not test RAM and cload/csave are done on sdcard. It means that it calls file from sdcard.\n\nCload works with .tap file. Multitap files works too.\n\nGet a tape file, and place it in the root folder of the sdcard.\n\nStarts basic11 :\n/#basic11\nor\n/#basic11 \"DEFENDER\"\n\nCLOAD\"ZORGONS => it will load zorgons.tap\n\n## Working software\n\n+ Some games are not working because the rom in order to have the software working is release yet\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/basic11.asm\n"
},
{
"alpha_fraction": 0.6766743659973145,
"alphanum_fraction": 0.6974595785140991,
"avg_line_length": 15.037036895751953,
"blob_id": "e23fca94b03c778d3dcadf1ee0ee7ccfee43df85",
"content_id": "4d21c8c6506dcdca2053f868f4c129be22aa6789",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 27,
"path": "/docs/kernel/primitives/xdecay.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XDECAY\n\n## Usage\n\nConvert a string into number\n\nA et Y contains the ptr of the string\n\nXDECAY returns the value (16 bits) in AY and X contains the length of the decimal number provided in the input\n\n## Example\n\n```ca65\n\n .include \"telestrat.inc\"\n\n lda #<mystring\n ldy #>mystring\n BRK_TELEMON XDECAY\n\n ; A and Y contains the 16 bits value\n ; X contains the length of chars in mystring\n rts\n\n mystring:\n .asciiz \"125\"\n```\n"
},
{
"alpha_fraction": 0.5048409700393677,
"alphanum_fraction": 0.5380359888076782,
"avg_line_length": 14.717391014099121,
"blob_id": "328503e30f0d2830c48bc3d2b77c40c7d638d2e6",
"content_id": "aaded8f041b08495b97b917dd4750d0a2a029c1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 723,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 46,
"path": "/doxygen/doxybook_output_vi/Files/vi__vi__ptr__file__used__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_vi_ptr_file_used_plus_plus.s\n\n---\n\n# vi_vi_ptr_file_used_plus_plus.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_vi_ptr_file_used_plus_plus](Files/vi__vi__ptr__file__used__plus__plus_8s.md#Routine-vi-vi-ptr-file-used-plus-plus)** |\n\n\n## Routine documentation\n\n### Routine vi_vi_ptr_file_used_plus_plus\n\n```ca65\nvi_vi_ptr_file_used_plus_plus\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_vi_ptr_file_used_plus_plus\n ; A the char to add\n ; Insert into file (memory) char\n inc vi_ptr_file_used\n bne @no_inc\n inc vi_ptr_file_used+1\n@no_inc:\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5521235466003418,
"alphanum_fraction": 0.5733590722084045,
"avg_line_length": 10.949999809265137,
"blob_id": "f84afe7a1512756af98e2a0923435a9709d37526",
"content_id": "8e3d55a124e8d219fd7447e67bb423822236cec5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 40,
"path": "/docs/developer_manual/orixsdk_macros/tohex.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# tohex macro\r\n\r\n## Description\r\n\r\nconvert a 8 bit number hexadecimal\r\n\r\n## Usage\r\n\r\ntohex ptr\r\n\r\nptr may be: address\r\n\r\n## Output\r\n\r\nA and Y contains the ascii hexa value\r\n\r\n## Example 1\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"SDK_misc.mac\"\r\n\r\n tohex mynumber\r\n rts\r\nmynumber:\r\n .byte 12\r\n```\r\n\r\n## Example 2\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"SDK_misc.mac\"\r\n\r\n lda #12\r\n tohex\r\n rts\r\n```\r\n\r\nCall [XHEXA](../../../kernel/primitives/xhexa/) kernel function.\r\n"
},
{
"alpha_fraction": 0.7196652889251709,
"alphanum_fraction": 0.7196652889251709,
"avg_line_length": 10.380952835083008,
"blob_id": "452b8bbd1a1f3fcc72db9cbe2971341c0a8b2878",
"content_id": "8914a3e3f0b1619889bb73d0ec19309dbd91a80f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 21,
"path": "/docs/commands/viewhrs.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# viewhrs\n\n## Introduction\n\nDisplays a hrs file\n\n## SYNOPSYS\n\n+ /#viewhrs toto.hrs\n\n## DESCRIPTION\n\nDisplays a hrs file\n\n## EXAMPLES\n\n+ viewhrs me.hrs\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/viewhrs.asm\n"
},
{
"alpha_fraction": 0.7381974458694458,
"alphanum_fraction": 0.7381974458694458,
"avg_line_length": 12.70588207244873,
"blob_id": "ea4d618b047cd1c991a0b15b02ea8291d254cbd1",
"content_id": "6a0b252372ad9c96e4b787269cfe56d546fab5ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/docs/commands/otimer.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# otimer\n\n## Introduction\n\nDisplay timer since the Oric is power on\n\n## SYNOPSYS\n\n+ otimer\n\n## NOTES\n\nEach time Oric reboots, this timer is reset\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/otimer.asm\n"
},
{
"alpha_fraction": 0.5614906549453735,
"alphanum_fraction": 0.5863354206085205,
"avg_line_length": 17.720930099487305,
"blob_id": "6fbfa0ccf42cbade60748c54e5bb8588770bdeed",
"content_id": "00d4bf2cbcc11fa29c476d42f9d0ee3ddd937cff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 43,
"path": "/doxygen/doxybook_output/Files/vi__ptr__file__used__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_plus_plus.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ptr_file_used_plus_plus.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ptr_file_used_plus_plus](Files/vi__ptr__file__used__plus__plus_8s.md#function-vi-ptr-file-used-plus-plus)**() |\n\n\n## Functions Documentation\n\n### function vi_ptr_file_used_plus_plus\n\n```cpp\nvi_ptr_file_used_plus_plus()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_ptr_file_used_plus_plus\n inc vi_ptr_file_used\n bne @out\n inc vi_ptr_file_used+1\n@out:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5989159941673279,
"alphanum_fraction": 0.6151761412620544,
"avg_line_length": 19.705883026123047,
"blob_id": "b2e1830f86f8e7d00ada040567e8d24ff29d1abf",
"content_id": "0a6fd8bbe4f94e7429a9f22c7402cc6bf59d385f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 17,
"path": "/docs/kernel/primitives/xscroh.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XSCROH (Scrollup text mode)\r\n\r\n## Descroption\r\n\r\nScroll all lines from bottom to the top\r\n\r\n## Examples\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n ldx #$01 ; First line to scroll\r\n ldy #26 ; Last line to scroll\r\n BRK_TELEMON XSCROH\r\n rts\r\n```\r\n\r\n!!! tip \"See [scroll](../../developer_manual/orixsdk_macros/scroll) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.6296296119689941,
"alphanum_fraction": 0.6410256624221802,
"avg_line_length": 27.25,
"blob_id": "dcb18b0bd94318915985aeb7d839009c68f4efec",
"content_id": "60c9e5867c34039497847a0e938496298e9fcdc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 12,
"path": "/docs/kernel/primitives/xwr0.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XWR0\r\n\r\nPrint a char to the screen\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n lda #'A'\r\n BRK_TELEMON XWR0\r\n rts\r\n```\r\n\r\n!!! tip \"See [print](../../developer_manual/orixsdk_macros/print/) macro from orix-sdk to use it easily (with print #VALUE) or [cputc](../../developer_manual/orixsdk_macros/cputc/) macro from orix-sdk to use it easily\"\r\n"
},
{
"alpha_fraction": 0.5791139006614685,
"alphanum_fraction": 0.5854430198669434,
"avg_line_length": 11.739130020141602,
"blob_id": "6986532d5b63ac15170fc83a512dc31949015951",
"content_id": "6b88170cf18669743145ffe80dcf7eba9820ae2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 23,
"path": "/docs/developer_manual/orixsdk_macros/unlink.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# UNLINK macro\r\n\r\n## Description\r\n\r\nRemove a file\r\n\r\n## Usage\r\n\r\nunlink address\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_file.mac\"\r\n\r\n unlink myfile\r\n rts\r\nmyfile:\r\n .asciiz \"toto.txt\"\r\n```\r\n\r\nCall [XRM](../../../kernel/primitives/xrm/) kernel function.\r\n"
},
{
"alpha_fraction": 0.7673956155776978,
"alphanum_fraction": 0.7673956155776978,
"avg_line_length": 81.83333587646484,
"blob_id": "12bff226cca21981621bba729591619b08cbcb69",
"content_id": "aa65b1dcc06e2a8dbf58c907252dc77024794c0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 503,
"license_type": "no_license",
"max_line_length": 310,
"num_lines": 6,
"path": "/docs/developer_manual/oricutronvsreal.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Oricutron VS real hardware\r\n\r\n## Differencies and issues\r\n\r\n* usb controller is not fully emulated, it means that a lot of commands does not work on the emulation (mainly low usb command)\r\n* uppercase and lower case for filenames are not managed in oricutron. It means that a code could work on Oricutron but it won't work on real. Every access to sdcard or usbdrive with FAT fileformat must be done with filename and path in **uppercase** (eg : toto.txt must be sent to the controler like TOTO.TXT)\r\n"
},
{
"alpha_fraction": 0.5075681209564209,
"alphanum_fraction": 0.5479313731193542,
"avg_line_length": 14.983870506286621,
"blob_id": "97805637ca74545956b67e7e45b0fa01f06288b8",
"content_id": "d848f15087871099da9ddf4281badede1f895bbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 991,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 62,
"path": "/doxygen/doxybook_output_vi/Files/vi__check__if__previous__line__was__truncated_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_check_if_previous_line_was_truncated.s\n\n---\n\n# vi_check_if_previous_line_was_truncated.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_check_if_previous_line_was_truncated](Files/vi__check__if__previous__line__was__truncated_8s.md#Routine-vi-check-if-previous-line-was-truncated)** |\n\n\n## Routine documentation\n\n### Routine vi_check_if_previous_line_was_truncated\n\n```ca65\nvi_check_if_previous_line_was_truncated\n```\n\n\n\n\n## Source code\n\n```ca65\n\n.proc vi_check_if_previous_line_was_truncated\n\n\n lda vi_ptr_file_used\n sta vi_ptr1\n\n lda vi_ptr_file_used+1\n sta vi_ptr1+1\n\n ;vi_dec16_zp vi_ptr1\n\n lda vi_ptr1\n bne @S1\n dec vi_ptr1+1\n@S1:\n dec vi_ptr1\n\n ldy #$00\n lda (vi_ptr1),y\n cmp #$0A\n beq @exit\n jsr vi_ptr_file_used_plus_plus\n@exit:\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.49147287011146545,
"alphanum_fraction": 0.5333333611488342,
"avg_line_length": 13.02173900604248,
"blob_id": "3f4ef2293b892346c1e83d8680fbf897fcbba7e4",
"content_id": "2ec00ca3932b95be55a98e64aa018d551d88fba0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 645,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 46,
"path": "/docs/tools_docs/vi/Files/vi__length__file__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_length_file_plus_plus.s\n\n---\n\n# vi_length_file_plus_plus.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_length_file_plus_plus](Files/vi__length__file__plus__plus_8s.md#Routine-vi-length-file-plus-plus)** |\n\n\n## Routine documentation\n\n### Routine vi_length_file_plus_plus\n\n```ca65\nvi_length_file_plus_plus\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_length_file_plus_plus\n ; add length_file=length_file+1\n inc vi_length_file\n bne @S1\n inc vi_length_file+1\n\n@S1:\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6010100841522217,
"avg_line_length": 16.217391967773438,
"blob_id": "31d02047def5e33eed5bc208e40fea4bbfdda80b",
"content_id": "f8e157ac1b64b51eabf23ee10378618c2947babc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 23,
"path": "/doxygen/doxybook_output_vi/Files/dir_e433504a3e785b34aabb2c6185efc4a1.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi\n\n\n\n## Directories\n\n| Name |\n| -------------- |\n| **[](Files/dir_6c260d28152e78a3ffcc2e06b7438967.md#dir-/mnt/c/users/plifp/onedrive/oric/projets/orix-software/vi/src)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.4522612988948822,
"alphanum_fraction": 0.5427135825157166,
"avg_line_length": 10.70588207244873,
"blob_id": "be8881b025f92b934da7ec5652e9ca2d31e2eab2",
"content_id": "356c1693bb82cc0f22e75bbcbb68943363388709",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 199,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 17,
"path": "/docs/tools_docs/vi/index_examples.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Examples\n\n---\n\n# Examples\n\n\n\n\n* **example [This](Examples/This-example.md#example-this)** <br>strlen of a line \n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:18 +0100\n"
},
{
"alpha_fraction": 0.5985699892044067,
"alphanum_fraction": 0.6064010858535767,
"avg_line_length": 48.779659271240234,
"blob_id": "331e9511ce162df9ce66682e3970ab1ee7a79169",
"content_id": "b086b0b79cfb60a0d6a9a534f4ab653789500004",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2937,
"license_type": "no_license",
"max_line_length": 219,
"num_lines": 59,
"path": "/docs/index.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Definitions\n\n## Start atmos/oric-1 tape file, ftdos .dsk files, roms\n\n### .tap file\n\n| Feature | read | Write | joystick management\n| ----------- | ---------------------------- |-------|---------------------|\n| `Atmos .tap file` | :material-check: |:material-check:|:material-check:|\n| `Oric-1 .tap file` | :material-check: |:material-check:|:material-check:|\n\n### .dsk file\n\n| Feature | read | Write | joystick management\n| ----------- | ---------------------------- |-------|---------------------|\n| `ftdos .dsk file` | :material-check: |:material-close: |:material-close: |\n\n## Twilighte board\n\nThe Twilighte board is an extra hardware connected on the the oric (Atmos or Oric-1), it improves default Oric with hardware feature as eeprom (in system update), RAM, joysticks, usb controler.\n\nIt handles 32 Banks of 16KB of eeprom and 32 Banks of 16KB for RAM. The architecture of hardware registers, joystick management has compatibility with Telestrat in order to have Oric working on Telestrat or Atmos.\n\n| Feature | Availability |\n| ----------- | ------------------------------------ |\n| `Sdcard` | :material-check: |\n| `Usbdrive storage` | :material-check: |\n| `Long filename` | :material-check: |\n| `Every usb device control` | :material-check: |\n| `2 Joysticks` | :material-check: |\n| `Joysticks works at independently` | :material-check: |\n| `512KB RAM memory` | :material-check: |\n| `512KB eeprom memory` | :material-check: |\n| `Eeprom in system update` | :material-check: |\n\n## Orix\n\n{ align=left }\n\nOrix is the default (D)OS of the board when it plugged into the oric. Orix is a linux/unix style OS. It's the main OS which can help to start every others systems as oric-1 ROM, atmos ROM etc\n\nOrix must have at least 2 banks to boot : Kernel and shell.\n\nKernel is a bank inserted in the 7th slot and it's the first start to boot. Shell is the 5th bank and contains sh binary.\n\nWhen system starts, kernel forks \"sh\" commands at the end of the kernel initialisation. Shell is available and can starts any commands.\n\nThere are 2 kind of rom :\n\n* Standalone ROM : it does not need to call kernel primitive, and manage all the main memory (for example : atmos ROM)\n\n* Orix Roms : in that case, rom does not manage the main memory, and calls kernel to do tasks (for example : Shell roms).\n\nIn Orix roms, the rom declares commands to an offset in the bank and can be accessed from command line. If any command are typed from prompt, kernel will launch \"XEXEC\" primitive to find in any rom where the command is.\n\n| Feature | Availability |\n| ----------- | ------------------------------------ |\n| `Multitasking` | :material-close: |\n| `Long filename management` | :material-close: |\n"
},
{
"alpha_fraction": 0.6973865032196045,
"alphanum_fraction": 0.711829423904419,
"avg_line_length": 25.884614944458008,
"blob_id": "b8055b32021a3e136a9686ecd1162ed56f54288f",
"content_id": "c8e89f1d9c346b4e9410e4881740ea42c134f86d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1491,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 52,
"path": "/docs/tutorials/network.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Gestion ethernet beta (WIP) : nécessite une carte à insérer dans la twilighte board\r\n\r\nATTENTION : c'est une version en cours de dev, il y a beaucoup de bugs.\r\n\r\n* Démarrer l'oric\r\n\r\n* s'assurer que le cable réseau est branché sur la carte réseau et que le cable est bien sur un réseau disposant d'un dhcp\r\n\r\n* Presser FUNCT+L (pour passer un bug) quitter (ESC) pui presser FUNCT+N et attendre le démarrage de la couche réseau.\r\n\r\n3 [OK] en vert devrait apparaitre, si ce n'est pas le cas, se référer à l'erreur concernée (cable débranché, dhcp ...)\r\n\r\n* Quand tout est vert, regarder l'ip récupérée au dhcp :\r\n\r\n/#ifcfg\r\n\r\nL'adresse IP est présente et la mac address de la carte est présente aussi.\r\n\r\n* Vérifier les commandes qu'apporte ce démarrage réseau (banque network numéro 34)\r\n\r\n/#help -b34\r\n\r\n* Vérifier le serveur DNS configuré\r\n\r\n/#resvctl\r\n\r\n* Faire une résolution dns d'oric.org puis de www.google.com\r\n\r\n/#dig www.oric.org\r\n\r\npuis\r\n\r\n/#dig www.google.com\r\n\r\n* Envoyer un message sur un serveur rsyslog\r\n\r\n=> nécessite d'avoir un serveur rsyslog sur le réseau acceptant les messages en UDP (à configurer sur le serveur distant)\r\n\r\n/#nc -u 192.168.1.200 514 -s MONMSG\r\n\r\n* Telecharger un fichier de 5 mo et l'écrire sur disque\r\n\r\n/# curl\r\n\r\n=> attendre 3 Mins, le programme rend la main.\r\n\r\n/# ls -l\r\n\r\nun fichier index.htm de 5mo est présent\r\n\r\n/# v\r\n=> fait appelle un serveur web, et récupère un index.hlp qui s'affiche à l'écran\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7514367699623108,
"alphanum_fraction": 0.7816091775894165,
"avg_line_length": 17.3157901763916,
"blob_id": "939289b797c6f17a7106bd1ff376bc1114dbcb54",
"content_id": "a1778b5fb9dfebcfafb46bfbccd23ecb82990c39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 38,
"path": "/docs/commands/setfont.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# setfont\n\n## Introduction\n\nChange the default font\n\n## SYNOPSYS\n\n+ setfont fontname\n\n## DESCRIPTION\n\nThis command overwrite the current charset with a new one\n\n## EXAMPLES\n\n+ setfont boldfr0\n+ setfont default\n\n## Lists of fonts\n\n* BLACKFR0 BOLDFR0 CLEARUK0 CURSIVFR\n* DIGITAFR FANTA1UK FANTA3UK GREEKUK0\n* NAROW2FR ORICFR0 SMLCAPUK SQUBOLUK\n* BLACKUK0 BOLDUK0 CURSI2FR CURSIVUK\n* DIGITAUK FANTA2UK GOTHIQFR MIROIRUK\n* NAROW2UK ORICUK0 SQUAREUK SYMBOLUK\n* BOLD2FR0 CLEARFR0 CURSI2UK DEFAULT\n* ENVERSUK FANTA3FR GOTHIQUK MODERNFR\n* NARROWUK SLANTUK0 SQUBOLFR\n\n## NOTES\n\nFont files are in /usr/share/fonts\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/setfont.asm\n"
},
{
"alpha_fraction": 0.6849656701087952,
"alphanum_fraction": 0.699937641620636,
"avg_line_length": 18.81818199157715,
"blob_id": "39a52009eb3ae65d9aa8c451052d7fc72a987256",
"content_id": "d0270460edfe60dd839b73f3a27cafed1624ff83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1625,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 77,
"path": "/docs/tutorials/start_a_software_fr.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Démarrer un software\r\n\r\n## Démarrer un soft oric-1 ou atmos (Loader v2022.3)\r\n\r\nExemple d'un jeu atmos (A.T.M)\r\n\r\n* Démarrer l'oric\r\n\r\n* Au shell, appuyer sur funct+L : un menu apparait.\r\n\r\n* Aller sur l'icone joyspad.\r\n\r\n* Appuyer sur la touche A -> Nous arrivons sur le premier jeu commençant par la lettre A\r\n\r\n* Appuyer sur espace : la fiche de jeu s'affiche.\r\n\r\n* Utiliser la flèche du bas pour descendre et accéder par exemple aux commentaires.\r\n\r\n* Pour revenir en haut de la fiche, appuyer sur la touche haut.\r\n\r\n* Quitter la fiche jeu, appuyer sur espace.\r\n\r\n* Appuyer sur entrée pour démarrer ATM.\r\n\r\n* Pour quitter le jeu, appuyer sur le bouton reset.\r\n\r\nLe loader permet de lancer les soft oric-1 et atmos, mais pas seulement à l'avenir\r\n\r\n## Demarrer un soft oric-1 only\r\n\r\n* Démarrer l'oric\r\n\r\n* taper basic10 -g\r\n\r\n* naviguer avec les flèches et choisir le soft à lancer avec les flèches : Appuyer sur entrée\r\n\r\n## Demarrer un soft Atmos only\r\n\r\n* Démarrer l'oric\r\n\r\n* taper basic11 -g\r\n\r\n* naviguer avec les flèches et choisir le soft à lancer avec les flèches : Appuyer sur entrée\r\n\r\n## Demarrer un soft personnel sur atmos\r\n\r\n* Sur le shell, taper :\r\n\r\ncd /home/basic11\r\nls\r\n\r\nMemoriser le nom d'un .tap dans la liste\r\n\r\nPuis taper au shell basic11\r\n\r\n* basic11\r\n\r\n* Puis faire un cload : CLOAD\"MONTAP\r\n\r\n## Démarrer un .dsk jasmin sans lecteur jasmin (beta)\r\n\r\nRepérer un dsk dans /home/basic11/dsk\r\n\r\n/#cd home/basic11/dsk\r\n/#ls\r\n\r\nPrendre par exemple le dsk theoric9.dsk\r\n\r\nLancer ftdos avec ce dsk :\r\n\r\nftdos theoric9\r\n\r\nLe dsk démarre.\r\n\r\nRegarder le contenu du dsk :\r\n\r\n!CAT\"\r\n"
},
{
"alpha_fraction": 0.670769214630127,
"alphanum_fraction": 0.7218461632728577,
"avg_line_length": 34.494380950927734,
"blob_id": "97e76f18b3a944d9d54bd797af769fcb3cc8fd80",
"content_id": "5b786dd4c8e579ee3ebc520616dcbfdf9ab4652f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3250,
"license_type": "no_license",
"max_line_length": 274,
"num_lines": 89,
"path": "/docs/developer_manual/memory.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Memory management\r\n\r\n## Hardware informations\r\n\r\nTwilighte board handles 512KB of RAM and 512KB of eeprom\r\n\r\n## Orix Roms\r\n\r\nKernel has [XMALLOC](../../kernel/primitives/xmalloc) and [XFREE](../../kernel/primitives/xfree) primitives to allocate memory dynamictly. [XMALLOC](../../kernel/primitives/xmalloc) primitives only returns pointer from main memory.\r\n\r\nKernel can't instanciate extra memory from bank for instance.\r\n\r\n## Standalone ROM\r\n\r\nExtra memory can be accessed without kernel primitive with simple code\r\n\r\nThe banking management is the same as Telestrat in order to have Orix working on Telestrat too, because it keeps telestrat compatibility (7 banks available + 1 bank for Overlay RAM), that is why twilighte board adds others registers to keep telestrat compatibility\r\n\r\nThere is a register which contains a set of 4 banks. This register accept values from 0 to 7 : $343. Each value set another set of 4 banks. For example, if $343 contains 2, it will displays others parts of eeprom or SRAM when bit b0, b1, b2 in $321 register contains 4,3,2,1\r\n\r\n## Banking management explanation\r\n\r\nSwitching to another bank should not be done, but the behavior of the card is explained below.\r\n\r\nFor example, a ROM can be loaded into eeprom or ram with [orixcfg](https://orix-software.github.io/commands/orixcfg/orixcfg) Some others ROMS (as systemd, or others standalone rom) are loaded on the fly with loader menu.\r\n\r\nIf it needs more ram than the main memory, extended driver is not coded, but it could on the future.\r\n\r\nKernel is always reached when $321&7 is equal to 7 . $343 register does not change the behavior of the value set in $321, when $321&7=7 $321&6=6 and $321&5=5. When it's below 5, banks are swapped when register value of $343 change\r\n\r\nTo summarize :\r\n\r\n* bank 7 : kernel\r\n* bank 6 : basic11 (modified to work with sdcard or usb key)\r\n* bank 5 : shell\r\n\r\nTheses banks can't be switch and are always shown but for others banks (4,3,2,1), you have (depending the value of $343)\r\n\r\n* bank 4: what you want\r\n* bank 3: what you want\r\n* bank 2: what you want\r\n* bank 1: what you want\r\n\r\nTheses banks can be switches to others set banking RAM or ROM with a flag.\r\n\r\nYou should not use this code, because it's not necessary to use theses routines when you do an orix bank (Kernel provides routines, or facility to call binaries in banks)\r\n\r\nIn the case you need to switch to another bank, you can do this : \r\nIf you want to switch to bank 4 (hardware) you have to do in main ram, but there is also vectors) :\r\n\r\n```ca65\r\nsei\r\nlda $321\r\nand #%11111000 ; Do mask for b0,b1, b2 of banking register\r\nora #$04 ; Switch to bank 4\r\nsta $321\r\ncli\r\n```\r\n\r\n; $C000 to $FFFF will be on bank 4 on eeprom bank because we did not set any others registers)\r\n\r\nif you want to switch to RAM mode :\r\n\r\n``` ca65\r\nsei\r\nlda $342\r\nora #%00100000 ; switch to ram set\r\nsta $342\r\nlda $321\r\nand #%11111000 ; Do mask for b0,b1, b2 of banking register\r\nora #$04 ; Switch to bank 4\r\nsta $321\r\n\r\ncli\r\n```\r\n\r\nif you want to switch to antoher set RAM bank :\r\n\r\n``` ca65\r\nsei\r\nlda #$01 ; set 1 instead of current set at boot (0)\r\nsta $343\r\nlda $342\r\nora #%00100000 ; switch to ram set\r\nsta $342\r\nlda #$04 ; switch to bank set of ram bank\r\nsta $321\r\ncli\r\n```\r\n\r\n"
},
{
"alpha_fraction": 0.5942263007164001,
"alphanum_fraction": 0.6394988298416138,
"avg_line_length": 34.51375961303711,
"blob_id": "305b92f5c3f675582ea5ab73f3b474a889ac1f7c",
"content_id": "a8eed190fa98601f08b7ff737d0ae3501e46798c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 15596,
"license_type": "no_license",
"max_line_length": 273,
"num_lines": 436,
"path": "/pandoc/parts/part1.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Orix/Twilighte board Manual\nauthor: v2022.2\ndate: rev 26/06/2023\n---\n\n{width=\"17cm\" height=\"12.749cm\"}\n\n\n\\newpage\n\n\n\n\n## EXTRA DOCUMENTATION\n\n===================\n\nRom loader and firmware twilighte board menu\n--------------------------------------------\n\nhttps://github.com/orix-software/systemd/blob/master/docs/pdf/systemd.pdf\n\n[]{#anchor}INTRODUCTION\n=======================\n\n[]{#anchor-1}What is new (v2023.2) ?\n-------------------------------------\n\nhttps://orix-software.github.io/update/2023_2/\n\n[]{#anchor-4}General informations\n---------------------------------\n\nThis documentation must be use for the use if orix version **2023.2**. Upgrade to the last version of Orix all the stuff working.\n\nDefinitions :\n\n* Orix is the OS working on the board.\n* Twilighte board is the hardware.\n\nOn [http://orix.oric.org](http://orix.oric.org/), you will have some youtube videos links showing how to use some functionnality.\n\nThe board has a firmware version (see twil command for more information). This firmware can be upgarded see\n« Hardware and firmware upgrade » section.\n\nThe card has a 512KB eeprom chip, and a 512KB RAM chip. Theres RAM and ROM areas are divided into \"bank\" of 16KB. That is why, there is 32 banks of ROM and 32 banks of RAM (use 'bank' tool, to see it)\n\nSome extra devices (TOM2, logitech joypad) are explained a bit in this manual, others hardware can work with the board.\n\n[]{#anchor-5}Features\n---------------------\n\n- .tap file fast loading (with multitap files)\n- Start Oric-1/Atmos tape file.\n- Write tap file.\n- Starts any ROM\n- Starts ftdos .dsk file\n- Starts Sedoric .dsk file\n- Joysticks support for a lot of games on atmos mode\n- No patch for tape file\n- in system : kernel update, roms and ram update (with\n [orixcfg](http://orix.oric.org/command-orixcfg/) binary)\n- 2 DB9 Joysticks (atari pinout)\n- 512KB of EEPROM (banking mode)\n- 512KB of RAM (banking mode)\n- read/write from sdcard (MAX 64GB) or usb drive (mass storage)\n- drag and drop from the PC to the oric : It will be available on the\n oric (with WIFI connexion) : It requires extra hardware with a\n raspberry pi zero\n- fast loading : 46KB per second. A game require less than one second\n to load and start\n- cumulus compatible with the help of an other amplibus board (not\n provided)\n\n\\section{}\n\n[]{#anchor-6}GETTING STARTED\n============================\n\n[]{#anchor-7}Content\n--------------------\n\n{width=\"6.08cm\" height=\"8.107cm\"}\n{width=\"10.509cm\" height=\"7.881cm\"}\n=================================================================================================================================================================================================================================================================================\n\n[]{#anchor-8}Physicals ports\n----------------------------\n\n{width=\"8.326cm\"\nheight=\"4.598cm\"}{width=\"9.181cm\"\nheight=\"4.262cm\"}\n\n[]{#anchor-9}Hardware limits\n----------------------------\n\nThe usb controler manage FAT32 only. Sdcard and usb key must be formatted with FAT32 filesystem. If you want to use pi zero gadget trick, you need to do a mkfs to FAT32 file system.\n\nAll tests had been done with samsung evo sdcard and sandisk usb key. A lot of sdcard works, and we did not see incompatibility with sdcard.\n\nSdcard controler and usb key controler can work with 32GB storage Max.\nBut it can handle 64 GB sdcard (tested). It can handle larger sdcard/usb key reader, but only 32 and 64 GB devices was used.\n\n[]{#anchor-10}Software limits\n-----------------------------\n\nThe sdcard/usb controler can handle long filename, but Orix handles 8+3 filename only.\n\n[]{#anchor-11}Information about joysticks part\n----------------------------------------------\n\nThe left port has only 1 button. The right port has 3 buttons. The\njoysticks pinout is atari pinout. You can use standard DB9 joystick. You\ncan also plug « TOM2 » hardware (not provided), it can connect a usb\nmouse or usb joypad (wireless) to theses ports. For example, logitech\njoypad F710 (wireless) works with TOM2.\n\nPlease note that TOM2 can only handle 2 buttons. It means that the third button can't work with TOM2 connected.\n\n{width=\"17cm\" height=\"3.902cm\"}\n--------------------------------------------------------------------------------------------\n\n{#anchor-12}First boot : Initialize the storage\n-------------------------------------------------------------------------------------------------------------------------------------------------\n\nWhen the board is sent, kernel is built with a default storage. In order\nto know which device is the default one, you can type « mount ». You can\nonly use one device at the same time, but you can swap easily theses\ndevices from command line.\n\nIf you see « sdcard », then sdcard will be read by default. You can\nchange it, with a command : « twil -u », it will switch to usbdrive. If\nyou want to have usb drive by default, you can program kernel with the\ntool « orixcfg ». See Orixcfg section.\n\nNow, if you know which device you will use by default, you can install\nall software on it.\n\nPlug the device on your PC (sdcard or usb key). If you have a pi zero w,\nyou can do this with drag and drop solution from the PC.\n\nDownload sdcard.tgz from this :\n<http://repo.orix.oric.org/dists/official/tgz/6502/>\n\nIt contains all software for orix there is others which are not\navailable in this archive.\n\nNow, use 7zip on your PC (or tar/gzip under linux), and unzip all files\nfrom this sdcard.tgz. Put all theses new files in your device root\nfolder.\n\nNow, you can insert the device (sdcard or usbkey -- or pi zero) in the\ntwilighte board and play.\n\n{width=\"10.659cm\" height=\"7.712cm\"}[]\n\n[]{#anchor-13}Upgrade from v2022.4 to v2022.4.1\n---------------------------------------------\n\nIf your orix version is below v2023.1 version, please go to annexes part at the\nend of this document, before you try to upgrade to v2023.2\n\n- Download\n <http://repo.orix.oric.org/dists/official/tgz/6502/sdcard.tgz>\n- untar/gunzip sdcard.tgz (use 7zip under windows) on your device usb\n or sdcard : It could require some time to copy because there is a\n lot of small files (tap, hlp etc)\n- you can start orix on real machine, and type :\n\n /\\#cd usr\\\n /usr\\#cd share\\\n /*usr/share\\#cd carts\\\n /usr/share/carts\\#cd 2023.2*\n\n Check orixcfg version\n\n orixcfg -v\n\n If orixcfg returns version 2023.2, use \"-k\" flag to upgrade kernel :\n\n orixcfg -k mycart.r64\n\n If you want to usr usb drive for default device :\n\n */usr/share/carts/2022.4\\#orixcfg -r -s 4 kernelus.r64*\n\n If you want to use sdcard for default device :\n\n /usr/share/carts/2022.4\\#orixcfg -r -s 4 kernelsd.r64\n\n- press 'y', and **wait until Orix reboots **\n\n (Don't switch off the Oric at this step)\n\n\n[]{#anchor-14}Optionnal step for upgrade\n----------------------------------------\n\nNow bank displays all banks from l to 64. It means that you should have\nsome strange bank signature for eeprom. Now an empty set is provided in\n*/usr/share/carts/2021.4 *folder. With Orixcfg you can initialize your\nset with this cart. Don't use « -s 4 » flag for orixcfg when you want to\nload emptyset.\n\n[]{#anchor-15}First step : type a command\n-----------------------------------------\n\nYou can access to available command from many ways :\n\n- From /bin folders, there is binary available on current device, 'ls'\n will show you available commands\n- From banks : type « help -b5 » you will see available commands\n\n\n\n\\newpage\n\\center\n\\Huge Commands\n\n\\flushleft\n\n\\normalsize\n\n\\newpage\n\n\n[]{#anchor-26}Basic10 & Basic11\n=====================\n\nLaunch\n------\n\nBasic10 starts Oric-1 rom with sdcard/usb key support\n\nBasic11 starts Atmos rom with sdcard/usb key support\n\nYou can type basic11 or press FUNCT+B to start\n\n[]{#anchor-27}Load a personal .tap file\n---------------------------------------\n\nWhen you starts basic11 commands, the default path is\n*« /home/basic11/ ». Each action on the basic11 mode will be done in\nthis folder (cload/csave). If you cload a tape file, it must be in\n« /home/basic11 » folder.*\n\n{width=\"7.544cm\" height=\"5.447cm\"}\n\nYou have downloaded a .tap file, and want to use it.\nThen, you can create a folder /*home*/basic11/\n\nUnder Orix\n\n/\\#mkdir home\\\n/\\#cd home\\\n/home\\#mkdir basic11\\\n/home\\#cd basic11\n\nPut you file in this folder from your PC, and start basic11 (you don't\nneed to be in the «/home/basic11 » folder to start basic11 with no\nparameter. By default, basic11 starts in « /home/basic11/ »\n\n[]{#anchor-28}Oric.org tape file\n--------------------------------\n\nWhen you downloaded sdcard.tgz and unzip it into sdcard or usbkey\ndevice, there is many tape file included in this archive. You don't need\nto move these type file, if you know the key, you can starts it from\ncommands line. In this case, it will load the correct basic1.1 rom to\nstart the tape file (see below), and the correct joystick configuration\nif it's correct.\n\n[]{#anchor-29}Oric.org tape file update\n---------------------------------------\n\nEach week a new software.tgz is generated. You can download it from\n« repo » and unzip it on the device. It will generate last tape file and\nlast joysticks configuration.\n\n[]{#anchor-30}Search a tape file from command line\n--------------------------------------------------\n\n{width=\"7.304cm\" height=\"5.398cm\"}[]{#anchor-31}\n\nBasic11 has also many.tap files inserted in sdcard.tgz\n\nTry to find the software with option -l\n\n/\\# basic11 -l\n\nIf you find your software, you can do perform **ctrl+c.**\n\nYou can type space to do a pause.\n\nOn that case, you can launch the tape file like :\n\n/\\# basic11 «KEYDISPLAYED\n\nWhen KEYDISPLAYED is the key displayed in key column. Please note that\nthe key must be in **UPPERCASE**\n\n[]{#anchor-32}Load a tap file from command line\n-----------------------------------------------\n\nNote that MYFILE must be in **UPPERCASE**\n\n/\\# basic11 «MYFILE\n\nIf MYFILE is in the oric.org database, it will launch the software with\nthe filename MYFILE.\n\nIf basic11 command does not find MYFILE in the oric.org database, it\nwill try to load it from /home/basic11/ folder.\n\n[]{#anchor-33}Save your program \n--------------------------------\n\n\\\nIf you start « basic11 » with no options, basic rom will starts and each\ncsave (or cload) actions will store files in « /*home/basic11 » folder*\n\n[]{#anchor-34}Start basic11 menu\n--------------------------------\n\n{width=\"7.384cm\"height=\"5.341cm\"}\n\nIf you type « basic11 -g » on command line or FUNCT+G,\nyou will have a menu with all software which have a download link on\noric.org (only atmos version and when a tape file is available).\n\n/\\#basic11 -g\n\nYou can use left and right letters to change to a new letter. If the\nletter is empty, it means that there is no available tap file for this\nletter.\n\nYou can use up and down link to navigate into software. If you press\nenter, the software will starts.\n\nNote that not all games are working yet. Some times, chars are\ncorrupted. If the joysticks does not works, there is two case :\n\n- the game does not call rom routine to manage keyboard\n- keyboard mapping is not done yet\n\nYou can use arrows to navigate into the menu :\n\n- up and down to select the software\n- right and left to switch to the menu letters\n\nSome letters are empty. It means that there is no software with tape\nfile available on oric.org for this letter\n\n[]{#anchor-35}Quit basic11\n--------------------------\n\nIf you want to quit basic11 from interpreter command line, you can type\n« QUIT ». This will force to reboot to Orix (you can also use reset\nbutton)\n\n[]{#anchor-36}How the .tap file starts\n--------------------------------------\n\nIf you only type « basic11 », this will start bank 6 (normal basic rom).\nThe default folder in that case is «/*home/basic11 »*\n\nIf you type « basic11 » with a tape file as an argument, there is 2\ncases\n\n1. The tape file (key) is already known in oric.org website, then\n basic11 try to find it in its databank file (/var/cache/basic11/\n folder). If the key is found, it will start the tape file located in\n «/usr/share/basic11/\\... »\n2. If the key is unknown, it will try to find it in «/home/basic11 »\n\nIf the tap file is in the oric.org db file, basic11 will load the\nsoftware configuration from the db software file (as joystick\nconfiguration, and the id of the rom). Basic11 load the right rom into\nram bank, override the default basic11 path to the tape file folder\n(« *usr/share/basic11/\\[firstletter software\\]. *\n\nIt means that if you load this kind of software and you can quit the\nsoftware, each file action in basic11 rom, will be performed in\n« usr/share/basic11/\\[firstletter software\\]. »\n\n[]{#anchor-37}Not working tapes (for instance)\n----------------------------------------------\n\n- All Oric-1 games can be started with FUNCT+L in ROM menu : start\n oric-1 (depending of your device), and put .tap files in\n /home/basic10\n- Software which does not work (25), but the number can be reduced in\n future release.\n\n ----------------------------- --------------------- ------------------------------------------------------------\n cobra Cobra pinball Damsel in distress\n Rush hour 4K\n Le diamant de l'ile maudite Durendal HU\\*BERT\n Hunchback Schtroumpfs Stanley (ROM 0,1 tested)\n Them Titan Visif\n Xenon III Dig Dog Elektro Storm\n Kilburn Encounter Le tresor du pirate L'aigle d'or (ROM 0,1 tested)\n Compatible (micropuce) Volcanic demo Clavidact\n DAO Cobra Soft CW-Morse The Hellion\n MARC Caspak Kryllis : when we lost one life, the game does not restart\n ----------------------------- --------------------- ------------------------------------------------------------\n\n[]{#anchor-38}Tape with altered charset\n---------------------------------------\n\n ------------ ------------ -------------------------\n Fire flash Scuba Dive 3D fongus (i,f letters)\n\n ------------ ------------ -------------------------\n\n[]{#anchor-39}Joysticks issues\n------------------------------\n\nWe did keyboard/joystick mapping for a lot of games, but we did not set\nthe keyboard mapping for all software. If you want to help us, contact\nus.\n\nSome game does not work because they handle their own keyboard routine.\nIt could be handle with hardware tricks but, it's not done.\n\nSome others games uses special keys (SHIFT, CTRL) for direction or the\nfirst button. Theses cases are not handle yet : but it could in the\nfuture.\n\n\n\\newpage\n"
},
{
"alpha_fraction": 0.6212624311447144,
"alphanum_fraction": 0.6338870525360107,
"avg_line_length": 32.20454406738281,
"blob_id": "2fa3bbb777a0cc8eb5e09e5be817a3d5c6f526f4",
"content_id": "2d613fcb3d2c8639e2711e66112f08608155255b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1505,
"license_type": "no_license",
"max_line_length": 350,
"num_lines": 44,
"path": "/docs/developer_manual/orixsdk_macros/fopen.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# FOPEN macro\r\n\r\n## description\r\n\r\nOpen a file.\r\n\r\n## usage\r\n\r\nfopen file, mode [,TELEMON] [,ptr] [,oom_msg_ptr] [,fail_value]\r\n\r\n- file may be: (ptr), address\r\n- if parameter 'ptr' is present, store resulting AX in ptr & ptr+1\r\n- if parameter 'oom_msg_ptr' is present, emit string pointed by 'oom_msg_ptr' and return if AX is $FFFF (ie XOPEN error)\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\" ; from cc65\r\n .include \"fcntl.inc\" ; from cc65\r\n .include \"../orix-sdk/macros/SDK_file.mac\"\r\n .include \"../orix-sdk/macros/SDK_print.mac\"\r\n\r\nstart:\r\n fopen (basic11_ptr2), O_RDONLY,,fp ; open the filename located in ptr 'basic11_ptr2', in readonly and store the fp in fp address\r\n cpx #$FF\r\n bne @read_maindb ; not null then start because we did not found a conf\r\n cmp #$FF\r\n bne @read_maindb ; not null then start because we did not found a conf\r\n\r\n print str_basic11_missing\r\n crlf ; Macro for return line\r\n lda #$FF\r\n ldx #$FF\r\n rts\r\nfp:\r\n .res 2\r\n@read_maindb:\r\n ; bla\r\n\r\n```\r\n\r\n!!! warning \"The filename/path address must not be in the rom. If it's the case, the string must be copied into main memory because Kernel overlap the ROM. fopen macro from SDK will produce an error, if the 'address' is in a ROM range (eg : $c000-$FFFF). If you use a ptr, macro can not detect it, and XOPEN primitive won't be able to open your file\"\r\n\r\nSee [XOPEN](../../../kernel/primitives/xopen) kernel primitive.\r\n"
},
{
"alpha_fraction": 0.5492424368858337,
"alphanum_fraction": 0.5568181872367859,
"avg_line_length": 10,
"blob_id": "75e7d769c05624fe116468284c7e1e523c81a969",
"content_id": "3c12ea26416333b70cb446ba0df0227363aafba3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 264,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 22,
"path": "/docs/developer_manual/orixsdk_macros/zap.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Zap\r\n\r\n## Description\r\n\r\nProduce a zap sound\r\n\r\n## Usage\r\n\r\nzap\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_sound.mac\"\r\n\r\n zap\r\n rts\r\n\r\n```\r\n\r\nCall [XZAP](../../../kernel/primitives/xzap/) kernel function.\r\n"
},
{
"alpha_fraction": 0.6234765648841858,
"alphanum_fraction": 0.6433611512184143,
"avg_line_length": 16.522472381591797,
"blob_id": "ba2271114a45bca9d27d068709749ae81dbbb88e",
"content_id": "4d3baec66775c9d0a025025c7575972f6c4615ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3118,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 178,
"path": "/doxygen/doxybook_output_vi/Structs/structvi__struct__data.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_struct_data\n\n---\n\n# vi_struct_data\n\n\n\n\n\n## Public Attributes\n\n| | Name |\n| -------------- | -------------- |\n| char | **[xpos_screen](Structs/structvi__struct__data.md#variable-xpos-screen)** |\n| char | **[ypos_screen](Structs/structvi__struct__data.md#variable-ypos-screen)** |\n| int | **[pos_file_addr](Structs/structvi__struct__data.md#variable-pos-file-addr)** |\n| char[4] | **[pos_file](Structs/structvi__struct__data.md#variable-pos-file)** |\n| char | **[posx_command_line](Structs/structvi__struct__data.md#variable-posx-command-line)** |\n| char[VI_MAX_LENGTH_FILENAME] | **[name_file_open](Structs/structvi__struct__data.md#variable-name-file-open)** |\n| | **[int](Structs/structvi__struct__data.md#variable-int)** |\n| | **[ptr_file_begin](Structs/structvi__struct__data.md#variable-ptr-file-begin)** |\n| char[4] | **[length_file](Structs/structvi__struct__data.md#variable-length-file)** |\n| int | **[pos_ptr_video_address](Structs/structvi__struct__data.md#variable-pos-ptr-video-address)** |\n| char[4] | **[file_number_of_line](Structs/structvi__struct__data.md#variable-file-number-of-line)** |\n| char | **[xpos_command_line](Structs/structvi__struct__data.md#variable-xpos-command-line)** |\n| char[39] | **[command_line_buffer](Structs/structvi__struct__data.md#variable-command-line-buffer)** |\n| int | **[ptr_last_char_file](Structs/structvi__struct__data.md#variable-ptr-last-char-file)** |\n| int | **[line_id](Structs/structvi__struct__data.md#variable-line-id)** |\n| char | **[xpos_text](Structs/structvi__struct__data.md#variable-xpos-text)** |\n| char | **[ypos_text](Structs/structvi__struct__data.md#variable-ypos-text)** |\n\n## Public Attributes Documentation\n\n### variable xpos_screen\n\n```ca65\nchar xpos_screen;\n```\n\n\nposition x of the cursor on the screen \n\n\n### variable ypos_screen\n\n```ca65\nchar ypos_screen;\n```\n\n\nposition y of the cursor on the screen \n\n\n### variable pos_file_addr\n\n```ca65\nint pos_file_addr;\n```\n\n\nposition on the file (address) \n\n\n### variable pos_file\n\n```ca65\nchar[4] pos_file;\n```\n\n\nposition in the file \n\n\n### variable posx_command_line\n\n```ca65\nchar posx_command_line;\n```\n\n\nposition on command line \n\n\n### variable name_file_open\n\n```ca65\nchar[VI_MAX_LENGTH_FILENAME] name_file_open;\n```\n\n\n### variable int\n\n```ca65\nint;\n```\n\n\n### variable ptr_file_begin\n\n```ca65\nptr_file_begin;\n```\n\n\nadress of the beginning of the file \n\n\n### variable length_file\n\n```ca65\nchar[4] length_file;\n```\n\n\nLength of the file \n\n\n### variable pos_ptr_video_address\n\n```ca65\nint pos_ptr_video_address;\n```\n\n\n### variable file_number_of_line\n\n```ca65\nchar[4] file_number_of_line;\n```\n\n\n### variable xpos_command_line\n\n```ca65\nchar xpos_command_line;\n```\n\n\n### variable command_line_buffer\n\n```ca65\nchar[39] command_line_buffer;\n```\n\n\n### variable ptr_last_char_file\n\n```ca65\nint ptr_last_char_file;\n```\n\n\n### variable line_id\n\n```ca65\nint line_id;\n```\n\n\n### variable xpos_text\n\n```ca65\nchar xpos_text;\n```\n\n\n### variable ypos_text\n\n```ca65\nchar ypos_text;\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100"
},
{
"alpha_fraction": 0.4703703820705414,
"alphanum_fraction": 0.5037037134170532,
"avg_line_length": 12.965517044067383,
"blob_id": "77c1dd6be3b842c42fe0587f43d485d07b089b84",
"content_id": "94328e816457bd998b28f3a0286a9ea94e833241",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 810,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 58,
"path": "/docs/tools_docs/vi/Files/vi__decal__text_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_decal_text.s\n\n---\n\n# vi_decal_text.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_decal_text](Files/vi__decal__text_8s.md#Routine-vi-decal-text)** |\n\n\n## Routine documentation\n\n### Routine vi_decal_text\n\n```ca65\nvi_decal_text\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_decal_text\n ; A & X address dest to copy\n\n sta DECCIB\n stx DECCIB+1\n\n lda vi_ptr_file_used ; address first char\n ldy vi_ptr_file_used+1\n sta DECDEB\n sty DECDEB+1\n\n ldy #vi_struct_data::ptr_last_char_file\n lda (vi_struct),y\n sta DECFIN\n iny\n lda (vi_struct),y\n sta DECFIN+1\n\n BRK_TELEMON XDECAL\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.5619834661483765,
"alphanum_fraction": 0.5826446413993835,
"avg_line_length": 16.538461685180664,
"blob_id": "82c40e040257e9969ba5a565b9cd883eabc2a637",
"content_id": "f75fc24df7bc8e72c3a93a5354aeacbf94fd9916",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 484,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 26,
"path": "/docs/developer_manual/orixsdk_macros/initmainargs.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "\r\n# initmainargs\r\n\r\n## Description\r\n\r\nInitialize struct of the mainargs from current process\r\n\r\n## Usage\r\n\r\ninitmainargs ptr, ptr1, 0\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_mainargs.mac\"\r\n\r\n XMAINARGS = $2C\r\n XGETARGV = $2E\r\n argv := userzp ; 2 bytes\r\n argc := userzp+2 ; 1 byte\r\n\r\n initmainargs argv, argc, 0\r\n rts\r\n```\r\n\r\nCall [XMAINARGS](../../../kernel/primitives/xmainargs/) function.\r\n"
},
{
"alpha_fraction": 0.5487805008888245,
"alphanum_fraction": 0.5595818758010864,
"avg_line_length": 20.076923370361328,
"blob_id": "26c3f5c9cd5cda4614db651237c4597e0e577492",
"content_id": "7424505ee4f160034dff544746843fab8356e6f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2870,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 130,
"path": "/docs/user_manual/overview.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Overview\r\n\r\n```markmap\r\n\r\n# Orix\r\n\r\n## Users profile\r\n\r\n### tape file launchers (oric1/atmos)\r\n\r\n* [loader](../../commands/loader)\r\n* [basic11](../../commands/basic11)\r\n* [basic10](../../commands/basic10)\r\n\r\n### Roms launchers\r\n\r\n* [loader](../../commands/loader)\r\n* [bank](../../commands/bank)\r\n\r\n### .dsk launchers\r\n\r\n* [ftdos](../../commands/ftdos)\r\n\r\n### Music launchers\r\n\r\n* [mym](../../commands/mym)\r\n\r\n### Floppybuilder launchers\r\n\r\n* [blakes7](../../commands/blakes7)\r\n* [asm2k2](../../commands/asm2k2)\r\n* [barbitoric](../../commands/barboric)\r\n* [quintessential](../../commands/quintes)\r\n* [orictech](../../commands/orictech)\r\n* [30years](../../commands/30years)\r\n\r\n### .dsk management\r\n\r\n* [dsk-util](../../commands/dsk-util)\r\n* [ftdos](../../commands/ftdos)\r\n* [readdsk](../../commands/readdsk)\r\n* [raw2dsk](../../commands/raw2dsk)\r\n\r\n### Real floppy drive launchers\r\n\r\n* [bootfd](../../commands/bootfd)\r\n\r\n## Orix manager profiles\r\n\r\n### .dsk file management\r\n\r\n* [dsk-util](../../commands/dsk-util)\r\n* [ftdos](../../commands/ftdos)\r\n* [readdsk](../../commands/readdsk)\r\n* [raw2dsk](../../commands/raw2dsk)\r\n\r\n### Load rom/manage banks\r\n\r\n* [bank](../../commands/bank)\r\n* [help](../../commands/help)\r\n* [hexdump](../../commands/hexdump)\r\n* [orixcfg](../../commands/orixcfg)\r\n\r\n### File system management\r\n\r\n* [cat](../../commands/cat)\r\n* [cd](../../commands/cd)\r\n* [cp](../../commands/cp)\r\n* [cksum](../../commands/cksum)\r\n* [df](../../commands/cp)\r\n* [file](../../commands/file)\r\n* [grep](../../commands/grep)\r\n* [ls](../../commands/ls)\r\n* [mkdir](../../commands/mkdir)\r\n* [mount](../../commands/mount)\r\n* [rm](../../commands/rm)\r\n* [touch](../../commands/touch)\r\n* [twil](../../commands/twil)\r\n* [untar](../../commands/untar)\r\n\r\n### Process management\r\n\r\n* [ps](../../commands/ps)\r\n* [pstree](../../commands/pstree)\r\n\r\n### Orix management\r\n\r\n* [env](../../commands/env)\r\n* [ioports](../../commands/ioports)\r\n* [orixcfg](../../commands/orixcfg)\r\n* [reboot](../../commands/reboot)\r\n* [setfont](../../commands/setfont)\r\n* [submit](../../commands/submit)\r\n* [strerr](../../commands/strerr)\r\n* [twil](../../commands/twil)\r\n* [uname](../../commands/uname)\r\n\r\n### Manuals\r\n\r\n* [man](../../commands/man)\r\n\r\n### Rom management\r\n\r\n* [orixcfg](../../commands/orixcfg)\r\n* [loader](../../commands/loader)\r\n\r\n### Basic tools\r\n\r\n* [basic10](../../commands/basic10)\r\n* [basic11](../../commands/basic11)\r\n* [list](../../commands/list)\r\n* [viewscr](../../commands/viewscr)\r\n\r\n### Debugging\r\n\r\n* [cksum](../../commands/cksum)\r\n* [orixcfg](../../commands/orixcfg)\r\n* [hexdump](../../commands/hexdump)\r\n* [sterr](../../commands/strerr)\r\n\r\n### Visualization tools\r\n\r\n* [hexdump](../../commands/hexdump)\r\n* [list](../../commands/list)\r\n* [viewhrs](../../commands/viewhrs)\r\n* [viewscr](../../commands/viewscr)\r\n* [man](../../commands/man)\r\n* [more](../../commands/more)\r\n\r\n```\r\n"
},
{
"alpha_fraction": 0.5189048051834106,
"alphanum_fraction": 0.5593220591545105,
"avg_line_length": 15.67391300201416,
"blob_id": "41e0a5e6e4ffbd1a383f347979db9df95eb9d000",
"content_id": "a4d70a992bf5666667ff2072151ee86c727f412d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 767,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 46,
"path": "/doxygen/doxybook_output_vi/Files/vi__editor__switch__off__cursor_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/vi_editor_switch_off_cursor.s\n\n---\n\n# /Routines/vi_editor_switch_off_cursor.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_editor_switch_off_cursor](Files/vi__editor__switch__off__cursor_8s.md#Routine-vi-editor-switch-off-cursor)** |\n\n\n## Routine documentation\n\n### Routine vi_editor_switch_off_cursor\n\n```ca65\nvi_editor_switch_off_cursor\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_editor_switch_off_cursor\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n lda (vi_ptr_screen),y ; display cursor\n and #%01111111\n sta (vi_ptr_screen),y ; display cursor\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.562881588935852,
"alphanum_fraction": 0.5909646153450012,
"avg_line_length": 16.80434799194336,
"blob_id": "0902ab2889ab7508437eb4f32c8c93f4f08831d8",
"content_id": "d5ff66c8d1e4dbdd57ec949a72f22c2da01e266c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 819,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 46,
"path": "/doxygen/doxybook_output/Files/vi__length__file__plus__plus_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_length_file_plus_plus.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_length_file_plus_plus.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_length_file_plus_plus](Files/vi__length__file__plus__plus_8s.md#function-vi-length-file-plus-plus)**() |\n\n\n## Functions Documentation\n\n### function vi_length_file_plus_plus\n\n```cpp\nvi_length_file_plus_plus()\n```\n\n\n\n\n## Source code\n\n```cpp\n.proc vi_length_file_plus_plus\n ; add length_file=length_file+1\n inc vi_length_file\n bne @S1\n inc vi_length_file+1\n\n@S1:\n rts\n\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6424116492271423,
"alphanum_fraction": 0.662162184715271,
"avg_line_length": 23.3157901763916,
"blob_id": "bff67ab76a04debd81771470bec1cfa32aff95e1",
"content_id": "0841bb7e3600139bd7796f56ba8af752866b1c31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 962,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 38,
"path": "/docs/kernel/primitives/xexec.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XEXEC\r\n\r\n## Description\r\n\r\nStart a binary (located in a ROM or on the current device)\r\n\r\n## Input\r\n\r\nA & Y the ptr of the string.\r\n\r\n## Output\r\n\r\nReturns an error if the binary is not found\r\n\r\n## Modify\r\n\r\n* RES, RESB (kernel_create_process), TR0, TR1, TR4 (kernel_create_process), TR5 (kernel_create_process)\r\n* KERNEL_ERRNO (kernel_create_process), KERNEL_XKERNEL_CREATE_PROCESS_TMP (kernel_create_process),kernel_process_struct::kernel_pid_list (kernel_create_process)\r\n* DECDEB, DECFIN, DECCIB,DECTRV,ACC1M\r\n* VEXBNK, BUFEDT, BNKOLD, KERNEL_TMP_XEXEC, BNK_TO_SWITCH, KERNEL_KERNEL_XEXEC_BNKOLD\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n lda #<str\r\n ldy #>str\r\n ldx #$00 ; Fork\r\n BRK_TELEMON XEXEC\r\n rts\r\nstr:\r\n .asciiz \"mybin\"\r\n```\r\n\r\n!!! fail \"XEXEC does not manage './' or '../' before kernel v2023.2\"\r\n\r\n!!! warning \"./ calls and /mypath/mybinary calls are available since kernel v2023.2, but '../' is not available\"\r\n"
},
{
"alpha_fraction": 0.6853333115577698,
"alphanum_fraction": 0.7066666483879089,
"avg_line_length": 16.809524536132812,
"blob_id": "0d8865524afc9998b71e28a479934eee8964df66",
"content_id": "d7f4626df95f83ae3fa4f48c101ff402881c05c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 21,
"path": "/docs/commands/hexdump.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: hexdump\n\n### hexdump utility\n\n## SYNOPSYS\n+ hexdump -v\n+ hexdump -h\n+ hexdump -b bankid [,offset]\n+ hexdump file\n\n## EXAMPLES\n+ hexdump /bin/hexdump\n+ hexdump -b 7\n+ hexdump -b 33,16128\n\n## DESCRIPTION\nDisplay file or bank contents in hexadecimal.\nYou can use [SPACE] to pause the display ou [CTRL]+C to abort.\n\n## SOURCE\nhttps://github.com/orix-software/hexdump\n\n"
},
{
"alpha_fraction": 0.5356773734092712,
"alphanum_fraction": 0.5667011141777039,
"avg_line_length": 16.581817626953125,
"blob_id": "fadbad08a23706a6e6e31f520913f86628e86c39",
"content_id": "28063b3a91cbc683257b8ae1c5c67f84dd944ac7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 967,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 55,
"path": "/doxygen/doxybook_output/Files/vi__ypos__screen__sub__sub_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ypos_screen_sub_sub.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_ypos_screen_sub_sub.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_ypos_screen_sub_sub](Files/vi__ypos__screen__sub__sub_8s.md#function-vi-ypos-screen-sub-sub)**() |\n\n\n## Functions Documentation\n\n### function vi_ypos_screen_sub_sub\n\n```cpp\nvi_ypos_screen_sub_sub()\n```\n\n\n\n\n## Source code\n\n```cpp\n; A returns 1 if y=0\n; A returns 0 if y!=0\n\n.proc vi_ypos_screen_sub_sub\n ; ypos_screen=ypos_screen-1\n ldy #vi_struct_data::ypos_screen\n\n lda (vi_struct),y\n beq @no_substract\n sec\n sbc #$01\n\n sta (vi_struct),y\n lda #$00\n rts\n@no_substract:\n lda #$01\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.6315789222717285,
"alphanum_fraction": 0.6315789222717285,
"avg_line_length": 14.65217399597168,
"blob_id": "a2c4c51382fe3c2c86ee386f47710ee3b3e2f9eb",
"content_id": "0456562cc373530a405f4374718689ca0200110e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 23,
"path": "/docs/commands/cksum.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Command: cksum\n\n### checksum utility\n\n## SYNOPSYS\n+ cksum -h|-v\n+ cksum file [...]\n+ cksum @batchfile\n\n## EXAMPLES\n+ cksum /bin/cksum\n\n## DESCRIPTION\nchecksum and count the bytes in a file\n\n## OPTIONS\n* -h\n show this help message and exit\n* -v\n display program version and exit\n\n## SOURCE\nhttps://github.com/orix-software/cksum\n\n"
},
{
"alpha_fraction": 0.727748692035675,
"alphanum_fraction": 0.727748692035675,
"avg_line_length": 10.235294342041016,
"blob_id": "db05333d099d7055baff5c7de4baaebcdfcec2f1",
"content_id": "f773ed404289c9bc1eb830bc4d16d866e471c2fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 17,
"path": "/docs/commands/echo.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# echo\n\n## Introduction\n\nDisplay a message\n\n## SYNOPSYS\n\n+ #echo hello\n\n## DESCRIPTION\n\nDisplay a message\n\n## SOURCE\n\nhttps://github.com/orix-software/shell/blob/master/src/commands/echo.asm\n"
},
{
"alpha_fraction": 0.260869562625885,
"alphanum_fraction": 0.417391300201416,
"avg_line_length": 6.1875,
"blob_id": "30b556e42384ed0021fba24172bf0d9ae4176c19",
"content_id": "1a7d6f411ee4431d3ceb41cdef98cb1c2a15e4ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 16,
"path": "/docs/tools_docs/vi/index_groups.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: Modules\n\n---\n\n# Modules\n\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.7044335007667542,
"alphanum_fraction": 0.7044335007667542,
"avg_line_length": 21.55555534362793,
"blob_id": "be44d17d07b4d9d4bb9f27585523898109f1e94b",
"content_id": "ded657cbb6c6802f670a792e2a911c3e8c0158fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 9,
"path": "/docs/menu.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Getting started\n\n## Users manuals\n\n[Users manuals](../commands/all/){ .md-button .md-button--primary}\n\n## developers manual\n\n[Developers manuals](../developer_manual/){ .md-button .md-button--primary}\n"
},
{
"alpha_fraction": 0.4680851101875305,
"alphanum_fraction": 0.7234042286872864,
"avg_line_length": 8.199999809265137,
"blob_id": "abd9eeba20809426f6917e4cbb987474a6cb1229",
"content_id": "7b0f79cf176cc055bffe4ad1513c81ae6c5bbee1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 5,
"path": "/docs/commands/born1983.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Born1983\n\nLaunch born 1983 demo\n\n/#born1983\n\n"
},
{
"alpha_fraction": 0.4947735071182251,
"alphanum_fraction": 0.5319396257400513,
"avg_line_length": 14.105262756347656,
"blob_id": "110aab1f34421933a153e8e51eb7756c0005818d",
"content_id": "c1c285020fd2ae69273f04a8bb777fd04db43e19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 861,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 57,
"path": "/doxygen/doxybook_output_vi/Files/vi__shift__line__left__to__right__editor_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_shift_line_left_to_right_editor.s\n\n---\n\n# vi_shift_line_left_to_right_editor.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_shift_line_left_to_right_editor](Files/vi__shift__line__left__to__right__editor_8s.md#Routine-vi-shift-line-left-to-right-editor)** |\n\n\n## Routine documentation\n\n### Routine vi_shift_line_left_to_right_editor\n\n```ca65\nvi_shift_line_left_to_right_editor\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_shift_line_left_to_right_editor\n ; A the line to scroll\n\n tay\n lda TABLE_LOW_TEXT,y\n sta vi_ptr1\n lda TABLE_HIGH_TEXT,y\n sta vi_ptr1+1\n\n\n ldy #38\n@L1:\n lda (vi_ptr1),y\n iny\n sta (vi_ptr1),y\n dey\n dey\n bpl @L1\n\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.49929079413414,
"alphanum_fraction": 0.6260047554969788,
"avg_line_length": 20.363636016845703,
"blob_id": "884b5b070e19e5d1706f383eaeaa724e569156c0",
"content_id": "5a23f3d639fc8d088320d4ce762a7e22b63dbea1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2115,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 99,
"path": "/hardware/docs/twilighteboard/fdcWifiEsp.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# fdcsd\n\nI/O : \n* $315 : command port\n* $316 : data port\n\n\n\n\n\n## ESP32_CHECK_EXIST \n\nESP32_CHECK_EXIST = $06\n\nReturn data inverted bit\n\n## ESP32_GET_IC_VER\n\nESP32_GET_IC_VER = $01\n\nReturn version\n\n## ESP32_SET_MODE\n\nCH376_SET_MODE = $15\n\nSet mode \n\n\n## ESP32_SET_MODE_CODE_FDC\n\n## ESP32_LAUNCH_WIFI_SCAN $02\n\n* interrupt \n* return a struct : 2 bytes for the length of all ssid, and all sid\n\n\n\nESP32_DETECTED = $AA\n\n\n\nCH376_CMD_NONE = $00\n\n\nCH376_GET_ENTER_SLEEP = $03\nCH276_SET_USB_SPEED = $04 \nCH376_RESET_ALL = $05\nCH376_GET_FILE_SIZE = $0C ; Get the current file length\nCH376_SET_USB_MODE = $15\nCH376_GET_STATUS = $22\nCH376_RD_USB_DATA0 = $27\nCH376_WR_USB_DATA = $2C\nCH376_CMD_WR_REQ_DATA = $2D\nCH376_SET_FILE_NAME = $2F\nCH376_DISK_CONNECT = $30 ; check the disk connection status\nCH376_DISK_MOUNT = $31\nCH376_FILE_OPEN = $32\nCH376_FILE_ENUM_GO = $33\nCH376_CMD_FILE_CREATE = $34\nCH376_FILE_ERASE = $35\nCH376_FILE_CLOSE = $36\nCH376_BYTE_LOCATE = $39\nCH376_BYTE_READ = $3A\nCH376_BYTE_RD_GO = $3B\nCH376_BYTE_WRITE = $3C\nCH376_BYTE_WR_GO = $3D\nCH376_DISK_CAPACITY = $3E\nCH376_DISK_QUERY = $3F\nCH376_DIR_CREATE = $40\nCH376_SET_ADRESS = $45\nCH376_GET_DESCR = $46\nCH376_DISK_RD_GO = $55\n\n; CODE FOR CH376_SET_USB_MODE \n\nCH376_SET_USB_MODE_CODE_SDCARD = $03\n; The code of 06H means switch to valid USB-HOST, produce SOF package automatically. \nCH376_SET_USB_MODE_CODE_USB_HOST_SOF_PACKAGE_AUTOMATICALLY = $06\n\nCH376_USB_INT_SUCCESS\t\t= $14\nCH376_USB_INT_CONNECT\t\t= $15\nCH376_USB_INT_DISCONNECT\t= $16\nCH376_USB_INT_BUF_OVER\t\t= $17\nCH376_USB_INT_USB_READY\t\t= $18\nCH376_USB_INT_DISK_READ\t\t= $1d\nCH376_USB_INT_DISK_WRITE\t= $1e\nCH376_USB_INT_DISK_ERR\t\t= $1f\n\nCH376_ERR_OPEN_DIR = $41\nCH376_ERR_MISS_FILE = $42\nCH376_ERR_FOUND_NAME\t\t= $43\nCH376_ERR_DISK_DISCON\t = $82\nCH376_ERR_LARGE_SECTOR\t = $84\nCH376_ERR_TYPE_ERROR\t\t= $92\nCH376_ERR_BPB_ERROR\t\t\t= $A1\nCH376_ERR_DISK_FULL\t\t\t= $B1\nCH376_ERR_FDT_OVER\t\t = $B2\nCH376_ERR_FILE_CLOSE\t\t= $B4\n"
},
{
"alpha_fraction": 0.603225827217102,
"alphanum_fraction": 0.6161290407180786,
"avg_line_length": 16.235294342041016,
"blob_id": "21f37f5c19ef20ae1c0af73a0613b4e9892b0b97",
"content_id": "ba2b5e7fa5b873b9aa26b3525bad58720bd31dd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 310,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 17,
"path": "/docs/kernel/primitives/xrdw0.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# XRDW0\r\n\r\n## Usage\r\n\r\nWait a key from the keyboard\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n\r\n BRK_KERNEL XRDW0\r\n ; When a key is pressed, A contains the ascii of the value\r\n rts\r\n```\r\n\r\n!!! tip \"See [cgetc](../../developer_manual/orixsdk_macros/cgetc) macro from orix-sdk to use it\"\r\n"
},
{
"alpha_fraction": 0.4431487023830414,
"alphanum_fraction": 0.48250728845596313,
"avg_line_length": 11.94339656829834,
"blob_id": "df944b844462f36f850c6fcc3857b05ba8ea1f35",
"content_id": "b0faaf6e998b4c336b81c5bc3df360f1b05efa90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 53,
"path": "/docs/tools_docs/vi/Files/vi__scroll__to__left_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: vi_scroll_to_left.s\n\n---\n\n# vi_scroll_to_left.s\n\n\n\n## Routine\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_scroll_to_left](Files/vi__scroll__to__left_8s.md#Routine-vi-scroll-to-left)** |\n\n\n## Routine documentation\n\n### Routine vi_scroll_to_left\n\n```ca65\nvi_scroll_to_left\n```\n\n\n\n\n## Source code\n\n```ca65\n.proc vi_scroll_to_left\n pha\n\n ldy #vi_struct_data::xpos_screen\n lda (vi_struct),y\n tay\n@L3:\n lda (vi_ptr_screen),y\n dey\n sta (vi_ptr_screen),y\n iny\n iny\n cpy #39\n bne @L3\n pla\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6440422534942627,
"alphanum_fraction": 0.6621417999267578,
"avg_line_length": 18.15151596069336,
"blob_id": "735a0afcf58dc7da1f92aaad5dcf82ab97f9c176",
"content_id": "d80e6c8c8a4b50c4733aa5c44b68f25abd7fab9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 663,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 33,
"path": "/docs/samples/c_samples/mkdir.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Create a folder with cc65\r\n\r\nFor instance mkdir in cc65 telestrat target is bugged, here is a work around. Create a file _omkdir.s. It must contains : \r\n\r\nmkdir kernel primitive does not support absolute path, and it can only create folder in CWD.\r\n\r\n```ca65\r\n.include \"telestrat.inc\"\r\n\r\n.export _omkdir\r\n\r\n.importzp tmp1\r\n\r\n.proc _omkdir\r\n stx tmp1\r\n ldy tmp1\r\n\r\n BRK_TELEMON XMKDIR\r\n rts\r\n.endproc\r\n```\r\n\r\nAnd now, you C code, you can do (don't forget to add _omkdir.s to your command line to build _omkdir.s when you launch cl65):\r\n\r\n```c\r\nextern void omkdir(unsigned char *path);\r\n\r\n\r\nint main() {\r\n mkdir (\"myfolder\");\r\n return 0;\r\n}\r\n```"
},
{
"alpha_fraction": 0.6328375935554504,
"alphanum_fraction": 0.6473070979118347,
"avg_line_length": 73.2686538696289,
"blob_id": "500230b387d1673d12798c427cc7c716882ef1e5",
"content_id": "53ee7d56f7e240371e37534ccd330f0acf106ca1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4976,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 67,
"path": "/doxygen/doxybook_output_vi/Files/dir_a5544c2bf0b70f8d417c4d3bfea04409.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /Routines/subfunc/vi\n\n---\n\n# /Routines/subfunc/vi\n\n\n\n## Files\n\n| Name |\n| -------------- |\n| **[vi_add_char_to_text.s](Files/vi__add__char__to__text_8s.md#file-vi-add-char-to-text.s)** |\n| **[vi_check_0A.s](Files/vi__check__0A_8s.md#file-vi-check-0a.s)** |\n| **[vi_check_beginning_of_file.s](Files/vi__check__beginning__of__file_8s.md#file-vi-check-beginning-of-file.s)** |\n| **[vi_check_eof.s](Files/vi__check__eof_8s.md#file-vi-check-eof.s)** |\n| **[vi_check_if_previous_line_was_truncated.s](Files/vi__check__if__previous__line__was__truncated_8s.md#file-vi-check-if-previous-line-was-truncated.s)** |\n| **[vi_check_inserted_char_overflow_the_max_column.s](Files/vi__check__inserted__char__overflow__the__max__column_8s.md#file-vi-check-inserted-char-overflow-the-max-column.s)** |\n| **[vi_clear_command_line.s](Files/vi__clear__command__line_8s.md#file-vi-clear-command-line.s)** |\n| **[vi_compute_last_text_line.s](Files/vi__compute__last__text__line_8s.md#file-vi-compute-last-text-line.s)** |\n| **[vi_compute_video_adress.s](Files/vi__compute__video__adress_8s.md#file-vi-compute-video-adress.s)** |\n| **[vi_copy_arg1_to_name_file_open.s](Files/vi__copy__arg1__to__name__file__open_8s.md#file-vi-copy-arg1-to-name-file-open.s)** |\n| **[vi_decal_text.s](Files/vi__decal__text_8s.md#file-vi-decal-text.s)** |\n| **[vi_display_char.s](Files/vi__display__char_8s.md#file-vi-display-char.s)** |\n| **[vi_display_file_opened.s](Files/vi__display__file__opened_8s.md#file-vi-display-file-opened.s)** |\n| **[vi_fill_last_line.s](Files/vi__fill__last__line_8s.md#file-vi-fill-last-line.s)** |\n| **[vi_init_vi_struct.s](Files/vi__init__vi__struct_8s.md#file-vi-init-vi-struct.s)** |\n| **[vi_key_del.s](Files/vi__key__del_8s.md#file-vi-key-del.s)** |\n| **[vi_key_down.s](Files/vi__key__down_8s.md#file-vi-key-down.s)** |\n| **[vi_key_enter.s](Files/vi__key__enter_8s.md#file-vi-key-enter.s)** |\n| **[vi_key_left.s](Files/vi__key__left_8s.md#file-vi-key-left.s)** |\n| **[vi_key_right.s](Files/vi__key__right_8s.md#file-vi-key-right.s)** |\n| **[vi_key_up.s](Files/vi__key__up_8s.md#file-vi-key-up.s)** |\n| **[vi_length_file_plus_plus.s](Files/vi__length__file__plus__plus_8s.md#file-vi-length-file-plus-plus.s)** |\n| **[vi_length_file_sub_sub.s](Files/vi__length__file__sub__sub_8s.md#file-vi-length-file-sub-sub.s)** |\n| **[vi_ptr_file_used_plus_plus.s](Files/vi__ptr__file__used__plus__plus_8s.md#file-vi-ptr-file-used-plus-plus.s)** |\n| **[vi_ptr_file_used_plus_plus_and_check_eof.s](Files/vi__ptr__file__used__plus__plus__and__check__eof_8s.md#file-vi-ptr-file-used-plus-plus-and-check-eof.s)** |\n| **[vi_ptr_file_used_sub_sub.s](Files/vi__ptr__file__used__sub__sub_8s.md#file-vi-ptr-file-used-sub-sub.s)** |\n| **[vi_ptr_last_char_add.s](Files/vi__ptr__last__char__add_8s.md#file-vi-ptr-last-char-add.s)** |\n| **[vi_ptr_last_char_plus_plus.s](Files/vi__ptr__last__char__plus__plus_8s.md#file-vi-ptr-last-char-plus-plus.s)** |\n| **[vi_ptr_last_char_sub_sub.s](Files/vi__ptr__last__char__sub__sub_8s.md#file-vi-ptr-last-char-sub-sub.s)** |\n| **[vi_scroll_from_left_to_right_full_line.s](Files/vi__scroll__from__left__to__right__full__line_8s.md#file-vi-scroll-from-left-to-right-full-line.s)** |\n| **[vi_scroll_to_left.s](Files/vi__scroll__to__left_8s.md#file-vi-scroll-to-left.s)** |\n| **[vi_search_next_line.s](Files/vi__search__next__line_8s.md#file-vi-search-next-line.s)** |\n| **[vi_search_previous_cr.s](Files/vi__search__previous__cr_8s.md#file-vi-search-previous-cr.s)** |\n| **[vi_search_previous_line_beginning.s](Files/vi__search__previous__line__beginning_8s.md#file-vi-search-previous-line-beginning.s)** |\n| **[vi_set_ptr_last_char.s](Files/vi__set__ptr__last__char_8s.md#file-vi-set-ptr-last-char.s)** |\n| **[vi_set_xpos_0.s](Files/vi__set__xpos__0_8s.md#file-vi-set-xpos-0.s)** |\n| **[vi_set_xpos_from_A.s](Files/vi__set__xpos__from__A_8s.md#file-vi-set-xpos-from-a.s)** |\n| **[vi_shift_file_from_memory_one_char.s](Files/vi__shift__file__from__memory__one__char_8s.md#file-vi-shift-file-from-memory-one-char.s)** |\n| **[vi_shift_line_left_to_right_editor.s](Files/vi__shift__line__left__to__right__editor_8s.md#file-vi-shift-line-left-to-right-editor.s)** |\n| **[vi_strlen_current_line.s](Files/vi__strlen__current__line_8s.md#file-vi-strlen-current-line.s)** |\n| **[vi_vi_ptr_file_used_plus_plus.s](Files/vi__vi__ptr__file__used__plus__plus_8s.md#file-vi-vi-ptr-file-used-plus-plus.s)** |\n| **[vi_xpos_screen_plus_plus.s](Files/vi__xpos__screen__plus__plus_8s.md#file-vi-xpos-screen-plus-plus.s)** |\n| **[vi_xpos_screen_sub_sub.s](Files/vi__xpos__screen__sub__sub_8s.md#file-vi-xpos-screen-sub-sub.s)** |\n| **[vi_ypos_screen_plus_plus.s](Files/vi__ypos__screen__plus__plus_8s.md#file-vi-ypos-screen-plus-plus.s)** |\n| **[vi_ypos_screen_sub_sub.s](Files/vi__ypos__screen__sub__sub_8s.md#file-vi-ypos-screen-sub-sub.s)** |\n\n\n\n\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 14:20:17 +0100\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 14,
"blob_id": "7bdfef801f2f657d78bdc07815430ddc8591e4b8",
"content_id": "dfc93a3dd9e2b93bb4b25e4bfec75835a5841596",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 3,
"path": "/docs/developer_manual/hardware.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Hardware\n\n* [Twilighte Board](/hardware/).\n"
},
{
"alpha_fraction": 0.570135772228241,
"alphanum_fraction": 0.5927602052688599,
"avg_line_length": 17.808509826660156,
"blob_id": "af4c664d0d9882286e677254ad902c03ecce389a",
"content_id": "0ad51ccc645bc47cba04969c2ecabff2677d8557",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 884,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 47,
"path": "/doxygen/doxybook_output/Files/vi__compute__video__adress_8s.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "---\ntitle: /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_compute_video_adress.s\n\n---\n\n# /mnt/c/Users/plifp/OneDrive/oric/projets/orix-software/vi/src/functions/subfunc/vi/vi_compute_video_adress.s\n\n\n\n## Functions\n\n| | Name |\n| -------------- | -------------- |\n| | **[vi_compute_video_adress](Files/vi__compute__video__adress_8s.md#function-vi-compute-video-adress)**() |\n\n\n## Functions Documentation\n\n### function vi_compute_video_adress\n\n```cpp\nvi_compute_video_adress()\n```\n\n\n\n\n## Source code\n\n```cpp\n\n.proc vi_compute_video_adress\n ldy #vi_struct_data::ypos_screen\n lda (vi_struct),y\n tay\n lda TABLE_LOW_TEXT,y\n sta vi_ptr_screen\n lda TABLE_HIGH_TEXT,y\n sta vi_ptr_screen+1\n rts\n.endproc\n```\n\n\n-------------------------------\n\nUpdated on 2022-12-15 at 11:48:27 +0100\n"
},
{
"alpha_fraction": 0.5442359447479248,
"alphanum_fraction": 0.5576407313346863,
"avg_line_length": 14.954545021057129,
"blob_id": "e9d17a825961a8ef2522f36fa2906bae3b306596",
"content_id": "2d557108b54b33f3ea226b61bc0e01e24a099e78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 44,
"path": "/docs/developer_manual/orixsdk_macros/fread.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# FREAD macro\r\n\r\n## description\r\n\r\nRead byte from file\r\n\r\n## Usage\r\n\r\nfread ptr, size, count, fp\r\n\r\nnote:\r\nptr may be : (ptr), address\r\nsize may be: (ptr), address\r\nfp may be : address, #value, {address,y}\r\n\r\n## Output\r\n\r\nA & X returns length in 16 bits\r\n\r\n## Example\r\n\r\n```ca65\r\n .include \"telestrat.inc\"\r\n .include \"../orix-sdk/macros/SDK_file.mac\"\r\n\r\n fopen (MAN_SAVE_MALLOC_PTR), O_RDONLY\r\n cpx #$FF\r\n bne next\r\n\r\n cmp #$FF\r\n bne next\r\n ; Not found\r\n rts\r\n\r\nnext:\r\n ; Save FP\r\n sta MAN_FP\r\n stx MAN_FP+1\r\n fread (myptr), 1080, 1, MAN_FP ; myptr is from a malloc for example\r\n fclose(MAN_FP)\r\n rts\r\n```\r\n\r\nSee [XFREAD](../../../kernel/primitives/xfread) kernel primitive.\r\n"
},
{
"alpha_fraction": 0.6996527910232544,
"alphanum_fraction": 0.7065972089767456,
"avg_line_length": 20.153846740722656,
"blob_id": "77621fc94e907e1fc3f68d99ec11cb2c94080bf3",
"content_id": "e8b6f09b39b5d3847903982e3878fa92a060a94a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 26,
"path": "/docs/developer_manual/orixsdk.md",
"repo_name": "orix-software/orix-software.github.io",
"src_encoding": "UTF-8",
"text": "# Orix SDK\r\n\r\nIt provides macro to help assembly coding for Orix :\r\n\r\nhttps://github.com/assinie/orix-sdk/\r\n\r\nThe main way to handle orix-sdk is to add it as gitmodules (for example in a \"dependencies folder\")\r\n\r\n## Install in a project\r\n\r\n```bash\r\nmkdir myasmproject\r\ncd myasm_project\r\nmkdir dependencies && cd dependencies\r\ngit clone https://github.com/assinie/orix-sdk.git\r\n\r\n```\r\n\r\n## Load Orix SDK\r\n\r\nyou just need to load macro file in your code (and telestrat.inc from cc65):\r\n\r\n```ca65\r\n.include \"telestrat.inc\"\r\n.include \"dependencies/orix-sdk/macros/SDK.mac\"\r\n```\r\n"
}
] | 352 |
sagacitysite/brian2_loihi
|
https://github.com/sagacitysite/brian2_loihi
|
6c82edce242a0cfe1a9f21aa475b9bcfa26cfeb4
|
9ac5842e71c5df5d67119ae4b2ec6b7ed0bb446b
|
8107b5f474b8627605604d3746e49f879be732d8
|
refs/heads/main
| 2023-09-05T08:57:59.297562 | 2021-12-06T08:11:56 | 2021-12-06T08:11:56 | 347,937,113 | 16 | 5 |
MIT
| 2021-03-15T11:08:54 | 2021-10-18T13:34:58 | 2021-12-06T08:11:56 |
Python
|
[
{
"alpha_fraction": 0.6384817957878113,
"alphanum_fraction": 0.6418639421463013,
"avg_line_length": 43.349998474121094,
"blob_id": "bec32b4ff19bcdd453505faf4894f1ea818babee",
"content_id": "bcc913e2436489c8fe8544137502a6363bef55c0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2661,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 60,
"path": "/loihi_spike_generator_group.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "from brian2 import SpikeGeneratorGroup, second, ms\n\nclass LoihiSpikeGeneratorGroup(SpikeGeneratorGroup):\n \"\"\"\n The LoihiSpikeGeneratorGroup extends the SpikeGeneratorGroup class from Brian2.\n\n This class creates a spike generator which gets times as integer (without time\n units). In addition the schedule of the generator is updated to meet Loihi.\n The period parameter from the parent initialization method is complemented\n with a unit inside of the function (as the times parameter).\n\n Methods\n -------\n __init__(N, indices, times, period=0, order=0, sorted=False)\n Initializes the LoihiSpikeGeneratorGroup and the SpikeGeneratorGroup\n \"\"\"\n def __init__(self, N, indices, times, period=0, order=0, sorted=False, name='loihi_spikegeneratorgroup*'):\n \"\"\" Initializes the SpikeGeneratorGroupLoihi and the SpikeGeneratorGroup\n\n The init method adds time units to the times and period parameters.\n In addition the when property of the SpikeGeneratorGroup is changed to 'start'.\n All parameters are already part of the init method of the SpikeGeneratorGroup\n class and are just modified.\n\n Parameters\n ----------\n N : int\n The number of \"neurons\" in this group\n indices : array of integers\n The indices of the spiking cells\n times : list (int)\n The spike times for the cells given in ``indices``. Has to have the\n same length as ``indices`` and has to be integer (without time units)\n period : int, optional\n If this is specified, it will repeat spikes with this period. A\n period of 0 means not repeating spikes.\n order : int, optional\n The priority of of this group for operations occurring at the same time\n step and in the same scheduling slot. Defaults to 0.\n sorted : bool, optional\n Whether the given indices and times are already sorted. Set to ``True``\n if your events are already sorted (first by spike time, then by index),\n this can save significant time at construction if your arrays contain\n large numbers of spikes. Defaults to ``False``.\n name : str, optional\n A unique name for the object, otherwise will use\n ``loihi_spikegeneratorgroup_0'``, etc.\n \"\"\"\n\n # Define Brian spike generator group\n super().__init__(\n N,\n indices,\n times*ms,\n period=period*second,\n order=order,\n sorted=sorted,\n when='start', # Update schedule\n name=name\n )\n"
},
{
"alpha_fraction": 0.539922297000885,
"alphanum_fraction": 0.5611640810966492,
"avg_line_length": 41.08736038208008,
"blob_id": "033495dd0cc1cf233a7f79143fc99d62eee6d697",
"content_id": "0d63a094f37f754d3666b8af5af0d5ca56ee32b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22644,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 538,
"path": "/loihi_synapses.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "from brian2 import Synapses, ms\nimport re\nimport numpy as np\nfrom .parameter_checks import *\nfrom .constants import synapse_sign_mode\n\nclass LoihiSynapses(Synapses):\n \"\"\"\n The LoihiSynapses extends the Synapses class from Brian2.\n\n This class provides Loihi parameters for delay and pre- and post-synaptic\n traces. Note that all parameters are given just as a pure integer without\n a Brain2 time unit (like e.g. ms or seconds). If parameters for one of the\n traces are not given, the trace equations are not created for this trace.\n\n Methods\n -------\n __init__(source, target=None, parameters ...)\n Initializes the LoihiSynapses and the Synapses\n __str__()\n Prints object\n __getWeightPrecision()\n Gets precision of weights, which defined by the user via numWeightBits (or default value)\n __getWeightLimit()\n Gets the weight limit, which is a constant value\n __defineTraceEquation(name, imp, tau)\n Defines all trace equations (x1, x2, y1, y2, y3)\n __buildLearningRule(dw_raw)\n Build all equations for the learning rule\n __buildNoLearningRule()\n Build the update euqations for the actual weight\n calcActualWeights(weights)\n Calculates the actual weight, given a weight (=weight mantissa)\n \"\"\"\n\n @property\n def w_act(self):\n \"\"\" Property decorator to inclulde a getter for the actual weight w_act\n\n Returns\n -------\n int/list\n A single integer value or a list of weights\n \"\"\"\n if (self.w is None):\n raise Exception(\"Weight 'w' is not defined, therefore 'w_act' does not exist.\")\n return self.calcActualWeights(self.w)\n\n @w_act.setter\n def w_act(self, value):\n \"\"\" Property decorator to inclulde a setter for the actual weight w_act\n \"\"\"\n self._w_act = value\n\n @property\n def w(self):\n \"\"\" Property decorator to inclulde a getter for the weight mantissa w\n Returns\n -------\n int/list\n A single integer value or a list of weights\n \"\"\"\n return self._w\n\n @w.setter\n def w(self, weights):\n \"\"\" Property decorator to inclulde a setter for the weight w\n\n A setter for the connection weight that takes weights and checks\n if weights match Loihi values. If not, exceptions are raised.\n\n Parameters\n ----------\n source : SpikeSource\n The source of spikes, e.g. a NeuronGroup.\n\n Raises\n ------\n Exception\n If weights are not integer.\n Exception\n If weights is not in range of (-255...255) for mixed syanpses sign mode,\n (0...255) for excitatory and (-255...0) for inhibitory.\n \"\"\"\n\n # Make sure that weights are a numpy array\n weights = np.array(weights)\n\n # First check if all values are int\n if (weights.dtype not in [np.dtype('int8'), np.dtype('int16'), np.dtype('int32'), np.dtype('int64')]):\n raise Exception(\"Weights have to be integer values.\")\n\n # If sign mode is mixed, check weight range and round to precision\n if (self.sign_mode == synapse_sign_mode.MIXED and\n (np.any(weights < -256) or np.any(weights > 254))):\n raise Exception(\"Weights have to be between -256 and 254 in sign mode MIXED.\")\n\n # If sign mode is excitatory, check range\n if (self.sign_mode == synapse_sign_mode.EXCITATORY and\n (np.any(weights < 0) or np.any(weights > 255))):\n raise Exception(\"Weights have to be between 0 and 255 in sign mode EXCITATORY.\")\n\n # If sign mode is inhibitory, check range\n if (self.sign_mode == synapse_sign_mode.INHIBITORY and\n (np.any(weights < -256) or np.any(weights > 0))):\n raise Exception(\"Weights have to be between -256 and 0 in sign mode INHIBITORY.\")\n\n # Set actual weights once we have a valid weight mantissa\n self.w_act = self.calcActualWeights(weights)\n\n # Store weights\n self._w = weights\n\n # Not only set the attribute, but also the state variable for the equations\n self.set_states({'w': weights, 'w_act': self.calcActualWeights(weights)})\n\n def __init__(\n self, source, target=None, delay=0, dw='', w_exp=0,\n sign_mode=2, num_weight_bits=8,\n imp_x1=False, tau_x1=False, imp_x2=False, tau_x2=False,\n imp_y1=False, tau_y1=False, imp_y2=False, tau_y2=False, imp_y3=False, tau_y3=False,\n name='loihi_synapses*'\n ):\n \"\"\" Initializes the LoihiNetwork and the Network\n\n The init method checks all parameters and build the equations for those\n traces, where values were given as parameters. The euqations are then\n combined and used to initialise the parent Synapses class from Brian2.\n Note that the exact_clipped method is used to match the calculation\n of the traces on Loihi.\n\n The source and target parameters equal the parameters from the parent class.\n\n Parameters\n ----------\n source : SpikeSource\n The source of spikes, e.g. a NeuronGroup.\n target : Group, optional\n The target of the spikes, typically a NeuronGroup. If none is given, the same as source().\n delay: int, optional\n The synaptic delay.\n dw: str, optional\n Learning rule, using the pre- and post-synaptic traces. Also constant values are allowed.\n Note that only `*`, `-` and `+` is allowed.\n w_exp: int, optional\n Weight exponent which scales the weights by 2^(6 + w_exp).\n The weight exponent can be between -8 and 7.\n sign_mode: int, optional\n Defines if the synapses are mixed (1), excitatory (2) or inhibitory (3).\n Excitatory synapses are default.\n `synapse_sign_mode` can be used for defining the sign mode.\n num_weight_bits: int, optional\n Defines the precision of the weight, default is 8 bits.\n `num_weight_bits` is in a range between 0 and 8.\n imp_x1: int, optional\n The impulse of the first synaptic pre trace x1. The impulse is between 0 and 127.\n tau_x1: int, optional\n The time constant of the first synaptic pre trace x1. Tau has to be greater or equal to 0.\n imp_x2: int, optional\n The impulse of the first synaptic pre trace x2. The impulse is between 0 and 127.\n tau_x2: int, optional\n The time constant of the first synaptic pre trace x2. Tau has to be greater or equal to 0.\n imp_y1: int, optional\n The impulse of the first synaptic post trace y1. The impulse is between 0 and 127.\n tau_y1: int, optional\n The time constant of the first synaptic pre trace y1. Tau has to be greater or equal to 0.\n imp_y2: int, optional\n The impulse of the first synaptic post trace y2. The impulse is between 0 and 127.\n tau_y2: int, optional\n The time constant of the first synaptic pre trace y2. Tau has to be greater or equal to 0.\n imp_y3: int, optional\n The impulse of the first synaptic post trace y3. The impulse is between 0 and 127.\n tau_y3: int, optional\n The time constant of the first synaptic pre trace y3. Tau has to be greater or equal to 0.\n name : str, optional\n The name for this object. If none is given, a unique name of the form\n ``loihi_synapses``, ``loihi_synapses_1``, etc. will be automatically chosen.\n \"\"\"\n\n # Check and set synapses sign mode\n check_range_and_int(sign_mode, 'sign_mode', low=1, high=3)\n self.sign_mode = sign_mode\n\n # Check and set weight exponent\n check_range_and_int(w_exp, 'w_exp', low=-8, high=7)\n self.w_exp = w_exp\n\n # Check and set number of weight bits\n check_range_and_int(num_weight_bits, 'num_weight_bits', low=0, high=8)\n self.num_weight_bits = num_weight_bits\n\n # Check if impulse value is in a range of 0...62 and integer\n check_range_and_int(delay, 'delay', low=0, high=62)\n\n # Define weight equations\n #synaptic_input_update = '''I += w\\n'''\n synaptic_input_update = '''I += w_act\\n'''\n\n # check if a learning rule is given. If not build equations that only update w_act\n learning_rule = self.__buildNoLearningRule() if dw == '' else self.__buildLearningRule(dw)\n\n # Define trace equations\n x1_model, x1_pre = self.__defineTraceEquation('x1', imp_x1, tau_x1)\n x2_model, x2_pre = self.__defineTraceEquation('x2', imp_x2, tau_x2)\n y1_model, y1_post = self.__defineTraceEquation('y1', imp_y1, tau_y1)\n y2_model, y2_post = self.__defineTraceEquation('y2', imp_y2, tau_y2)\n y3_model, y3_post = self.__defineTraceEquation('y3', imp_y3, tau_y3)\n\n # Define parameters for printing\n self.loihi_parameters = {\n 'delay': delay,\n 'dw': dw,\n 'w_exp': w_exp,\n 'sign_mode': sign_mode,\n 'num_weight_bits': num_weight_bits,\n 'imp_x1': imp_x1,\n 'tau_x1': tau_x1,\n 'imp_x2': imp_x2,\n 'tau_x2': tau_x2,\n 'imp_y1': imp_y1,\n 'tau_y1': tau_y1,\n 'imp_y2': imp_y2,\n 'tau_y2': tau_y2,\n 'imp_y3': imp_y3,\n 'tau_y3': tau_y3,\n }\n\n # Define dependency factors\n x0_factor = ''\n if (imp_x1 and tau_x1) or (imp_x2 and tau_x2):\n x0_factor = '''x0 = 1\\n'''\n y0_factor = ''\n if (imp_y1 and tau_y1) or (imp_y2 and tau_y2) or (imp_y3 and tau_y3):\n y0_factor = '''y0 = 1\\n'''\n\n # Combine equations\n model = x1_model + x2_model + y1_model + y2_model + y3_model + learning_rule\n on_pre = synaptic_input_update + x0_factor + x1_pre + x2_pre\n on_post = y0_factor + y1_post + y2_post + y3_post\n\n # Create Brian synapses\n super(LoihiSynapses, self).__init__(\n source,\n target,\n model=model,\n on_pre=on_pre,\n on_post=on_post,\n delay=delay*ms,\n method='exact_synapse',\n name=name\n )\n\n def __str__(self):\n \"\"\"Creates a user friendly overview over all parameters\n\n This function makes it easy to get a transparent overview over all synapse parameters.\n \"\"\"\n print_string = 'Parameters of the synapses:\\n\\n'\n for key, value in self.loihi_parameters.items():\n print_string += '{:18} {:}\\n'.format(key, value)\n print_string += \"\\nFor getting the weights use the properties 'w' and 'w_act'.\\n\"\n\n return print_string\n\n def __getWeightPrecision(self):\n # Check if sign mode is mixed\n is_mixed = 1 if (self.sign_mode == synapse_sign_mode.MIXED) else 0\n\n # Define number of available bits\n num_lsb_bits = 8 - (self.num_weight_bits - is_mixed)\n\n # Calculate precision\n precision = 2**num_lsb_bits\n\n return precision\n\n def __getWeightLimit(self):\n # Define weight limit: 21 bits with last 6 bits zeros\n weight_limit = 2**21 - 2**6\n\n return weight_limit\n\n def __defineTraceEquation(self, name, imp, tau):\n \"\"\" Checks and defines trace equations from impulse and tau parameters\n\n This functions helps to define the trace equations for the LoihiSynapses.\n Every trace equation is optional. If parameters are empty, empty equations\n will be returned.\n\n Parameters\n ----------\n name : str\n The name of the trace (x1, x2, y1, etc.)\n imp : int\n The impulse of a synaptic pre or post trace\n tau : int\n The time constant of a synaptic pre or post trace\n\n Returns\n -------\n tuple\n a tuple containing the model equations and the on-pre/on-post equation\n each as a string\n \"\"\"\n model = ''\n on = ''\n\n if (imp and tau):\n p = { 'x': name, 'imp': imp, 'tau': tau }\n\n # Check if impulse value is in a range of 0...127 and integer\n check_range_and_int(imp, 'imp_'+name, low=0, high=127)\n # Check if tau value is in a range of 0...127 and integer\n check_lower_and_int(tau, 'tau_'+name, low=0)\n\n model = '''\n {x}_new = {x} * (1 - (1.0/{tau})) : 1\n {x}_int = int({x}_new) : 1\n {x}_frac = {x}_new - {x}_int : 1\n {x}_add_or_not = int({x}_frac > rand()) : 1 (constant over dt)\n {x}_rnd = {x}_int + {x}_add_or_not : 1\n d{x}/dt = {x}_rnd / ms : 1 (clock-driven)\n '''.format(**p)\n\n # third order coefficients\n #model = '''\n # {x}_new = {x} * (1 - (1.0/{tau}) + (1.0/{tau})**2 / 2 - (1.0/{tau})**3 / 6) : 1\n # {x}_int = int({x}_new) : 1\n # {x}_frac = {x}_new - {x}_int : 1\n # {x}_add_or_not = int({x}_new!={x}_int and 0.5 > rand()) : 1 (constant over dt)\n # {x}_rnd = {x}_int + {x}_add_or_not : 1\n # d{x}/dt = {x}_rnd / ms : 1 (clock-driven)\n #'''.format(**p)\n\n on = '''{x} = int(clip({x} + {imp}, 0, 127))\\n'''.format(**p)\n\n # Remove preceding spaces and tabs from model and return model and on as tuple\n return re.sub('(?<=\\\\n)[ \\t]*', '', model), on\n\n def __buildLearningRule(self, dw_raw):\n \"\"\" Takes a learning rule and returns Brian2 compatible equations\n\n First, the formula equation string is tested for several different problems.\n If problems are found, an exception is raised.\n Second, the equations for updating the weight and the actual weight are defined.\n\n Parameters\n ----------\n dw_raw : str\n The learning rule as a string, given from the user.\n\n Returns\n -------\n str\n The Brian2 equations to update the weight\n \"\"\"\n\n # Trim learning rule string: remove all tabs and whitespaces\n dw = re.sub('[ \\t]+', '', dw_raw)\n\n # First, check for division\n if (re.search('/+', dw) is not None):\n raise Exception(\"Division is not allowed.\")\n\n # Check if variables are used which are not support by this package in the current version\n # This is: r0, r1, d, t\n if (re.search('(r0)+', dw) is not None):\n raise Exception(\"The variable 'r0' is currently not supported by this package.\")\n if (re.search('(r1)+', dw) is not None):\n raise Exception(\"The variable 'r1' is currently not supported by this package.\")\n if (re.search('d+', dw) is not None):\n raise Exception(\"The variable 'd' is currently not supported by this package.\")\n if (re.search('t+', dw) is not None):\n raise Exception(\"The variable 't' is currently not supported by this package.\")\n\n # Check if any not-allowed variable is used\n # Allowed are x0, x1, x2, y0, y1, y2, y3, u, w, 0-9, +, -, *, ^, (, )\n match = re.findall('x0|x1|x2|y0|y1|y2|y3|u[0-9]+|w|sign\\(|[0-9]+|\\+|-|\\*|\\^|\\(|\\)', dw)\n # Construct string again from found parts and check if it can reproduce the learning rule string\n if (''.join(match) != dw):\n raise Exception(\"The learing rule contains some unsupported symbols. Allowed are: x0, x1, x2, y0, y1, y2, y3, u[0-9]+, w, sign, 0-9, +, -, *, ^, (, )\")\n\n # Check if any math symbol (+,-,*) is used in the end or if * is used in the beginning\n if (re.search('^\\*', dw) is not None):\n raise Exception(\"'*' is not allowed as the first symbol in the equation.\")\n if (re.search('[\\+\\*-]$', dw) is not None):\n raise Exception(\"'*', '+' and '-' is not allowed as the last symbol in the equation.\")\n\n # Check if every variable has a math symbol around it\n # Predefine a symbols to search for\n group = '(x0|x1|x2|y0|y1|y2|y3|u[0-9]+|w|sign\\([^)]*\\))'\n match_variables_all = re.findall(group, dw)\n match_variables_correct = re.findall('(?<=[+*(-])'+group+'(?=[+*)-])', '+'+dw+'+') # Small '+' hack to simplify regexp\n if (not np.array_equal(match_variables_all, match_variables_correct)):\n raise Exception(\"Some variables are not included correctly.\")\n\n # Check if all occuring 'u's are followed by a number\n if (len(re.findall('(u(?:\\D|$))', dw)) > 0):\n raise Exception(''u' must be followed by a number.')\n # The number is only allowed to start with 0, when the number has one digit\n match = re.findall('u[0-9]+.', dw+'.') # adding '.' in the end is a trick to also match a 'u' if it's in the end\n for m in match:\n if (re.search('u[1-9][0-9]+|u[0-9][^0-9]', m) is None):\n raise Exception(\"If 'u' is followed by a number with more than one digit, it cannot start with a '0'.\")\n\n # In this emulator, 'u' is only supported up to 'u9'\n if (re.search('u[0-9][0-9]+', dw) is not None):\n raise Exception(\"'u' is currently only supported between u0 and u9.\")\n\n # Check if ^ is prepended by 2 and is followed by a number or +/- and a number\n # The number has to be between -7 and 9, calculations (+/-/*) are not allowed in the exponent\n # Remove matches from string, if a ^ is remaining, it is malformed\n match = re.sub('2\\^\\+?[0-9]|2\\^-?[0-7]', '', dw)\n if (re.search('\\^', match) is not None):\n raise Exception(\"There is a malformed '^' in the equation.\")\n\n # Find terms in the equations and check if every term has an event variable (so called dependency factor)\n match = re.sub('(?<=\\([^)])([+-])(?=.*?\\))|(?<=2\\^)(\\+*?)(?=[0-9])|(?<=2\\^)(\\-*?)(?=[0-7])', '*', dw)\n for m in re.split('[\\+-]', match):\n if (re.search('(x0|y0|u[0-9]+)', m) is None and m != ''):\n raise Exception(\"There is at least one term in the equation that does not contain a dependency factor (x0, y0 or u[0-9]).\")\n\n # Get limits for weight mantissa, depending on sign mode of weight\n # Default to mixed sign mode\n w_low = -256\n w_high = 254\n # Adapt if excitatory\n if (self.sign_mode == synapse_sign_mode.EXCITATORY):\n w_low = 0\n w_high = 255\n # Adapt if inhibitory\n if (self.sign_mode == synapse_sign_mode.INHIBITORY):\n w_low = -256\n w_high = 0\n\n # Define variables for equation\n p = {\n 'dw': dw,\n 'precision': self.__getWeightPrecision(),\n 'is_mixed': int(self.sign_mode == synapse_sign_mode.MIXED),\n 'w_exp': self.w_exp,\n 'limit': self.__getWeightLimit(),\n 'w_low': w_low,\n 'w_high': w_high\n }\n\n learning_equations = '''\n u0 = 1 : 1\n u1 = int(t/ms % 2**1 == 0) : 1\n u2 = int(t/ms % 2**2 == 0) : 1\n u3 = int(t/ms % 2**3 == 0) : 1\n u4 = int(t/ms % 2**4 == 0) : 1\n u5 = int(t/ms % 2**5 == 0) : 1\n u6 = int(t/ms % 2**6 == 0) : 1\n u7 = int(t/ms % 2**7 == 0) : 1\n u8 = int(t/ms % 2**8 == 0) : 1\n u9 = int(t/ms % 2**9 == 0) : 1\n\n dw_rounded = int(sign({dw})*ceil(abs({dw}))) : 1\n quotient = int(dw_rounded / {precision}) : 1\n remainder = abs(dw_rounded) % {precision} : 1\n prob = remainder / {precision} : 1\n add_or_not = sign(dw_rounded) * int(prob > rand()) : 1 (constant over dt)\n dw_rounded_to_precision = (quotient + add_or_not) * {precision} : 1\n w_updated = w + dw_rounded_to_precision : 1\n w_clipped = clip(w_updated, {w_low}, {w_high}) : 1\n dw/dt = w_clipped / ms : 1 (clock-driven)\n\n w_act_scaled = w_clipped * 2**(6 + {w_exp}) : 1\n w_act_scaled_shifted = int(floor(w_act_scaled / 2**6)) * 2**6 : 1\n w_act_clipped = clip(w_act_scaled_shifted, -{limit}, {limit}) : 1\n dw_act/dt = w_act_clipped / ms : 1 (clock-driven)\n\n dx0/dt = 0 / ms : 1 (clock-driven)\n dy0/dt = 0 / ms : 1 (clock-driven)\n '''.format(**p)\n\n # Replace ^ by ** since python/sympy needs ** as potential symbol, but ^ is the Loihi way doing it\n learning_equations = learning_equations.replace('^', '**')\n\n # Remove preceding spaces and tabs and return\n r = re.sub('(?<=\\\\n)[ \\t]*', '', learning_equations)\n return r\n\n def __buildNoLearningRule(self):\n \"\"\"\n Builds all necessary update equations for the case that no learning rule is used.\n\n Returns\n -------\n str\n The Brian2 equations to update the weight\n \"\"\"\n\n # Define variables for equation\n p = {\n 'precision': self.__getWeightPrecision(),\n 'w_exp': self.w_exp,\n 'limit': self.__getWeightLimit()\n }\n\n # why compute this at each step? !!!!\n weight_equations = '''\n w : 1\n w_shifted = int(w / {precision}) * {precision} : 1\n w_scaled = w_shifted * 2**(6 + {w_exp}) : 1\n w_scaled_shifted = int(floor(w_scaled / 2**6)) * 2**6 : 1\n w_clipped = clip(w_scaled_shifted, -{limit}, {limit}) : 1\n dw_act/dt = w_clipped / ms : 1 (clock-driven)\n '''.format(**p)\n\n # Remove preceding spaces and tabs and return\n return re.sub('(?<=\\\\n)[ \\t]*', '', weight_equations)\n\n def calcActualWeights(self, weights):\n # Get weights (actually weight mantissa!)\n #weights = self.w\n\n # Define number of available bits\n precision = self.__getWeightPrecision()\n\n # Shift weight by number of availbale bits\n w_shifted = (weights / precision).astype(int) * precision\n\n # Scale weight with weight exponent\n w_scaled = w_shifted * 2**(6.0 + self.w_exp)\n\n # Shift scaled values by 6 bits back and forth\n w_scaled_shifted = (np.floor(w_scaled / 2**6)).astype(int) * 2**6\n\n # Clip to 21 bits with last 6 bits zeros\n # Note: We cannot clip the value before shifting it.\n # The 2**6 shifting trick makes problems with flooring the value.\n limit = self.__getWeightLimit()\n w_act = np.clip(w_scaled_shifted, -limit, limit)\n\n # Return actual weight\n return w_act\n\n"
},
{
"alpha_fraction": 0.5349397659301758,
"alphanum_fraction": 0.5662650465965271,
"avg_line_length": 18.761905670166016,
"blob_id": "357abfb03cf0f01c96e606ffa981151a4da82ea4",
"content_id": "d80f31216b68b8d2e3b003e680fbc70ae78f8940",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 21,
"path": "/constants.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "from types import SimpleNamespace\n\n# Define states for state monitors\nstate = SimpleNamespace(**{\n 'VOLTAGE': 'v',\n 'CURRENT': 'I',\n 'X1TRACE': 'x1',\n 'X2TRACE': 'x2',\n 'Y1TRACE': 'y1',\n 'Y2TRACE': 'y2',\n 'Y3TRACE': 'y3',\n 'WEIGHT': 'w',\n 'ACTUAL_WEIGHT': 'w_act'\n})\n\n# Synapse sign mode\nsynapse_sign_mode = SimpleNamespace(**{\n 'MIXED': 1,\n 'EXCITATORY': 2,\n 'INHIBITORY': 3\n})\n"
},
{
"alpha_fraction": 0.5496487021446228,
"alphanum_fraction": 0.5772833824157715,
"avg_line_length": 36.7876091003418,
"blob_id": "042cd7ad2e349055c8def857876f620e754f084e",
"content_id": "e4cefafbc33eeab9be4970597560cf65a0839932",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4270,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 113,
"path": "/loihi_neuron_group.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "from brian2 import NeuronGroup, ms\nimport re\nfrom .parameter_checks import *\n\nclass LoihiNeuronGroup(NeuronGroup):\n \"\"\"\n The LoihiNeuronGroup extends the NeuronGroup class from Brian2.\n\n This class defines the Loihi neuron model as differential equations,\n which are then defined in Brian2. Finally the Brian2 NeuronGroup is\n initialized with this neuron model.\n\n Methods\n -------\n __init__(N, refractory=1, threshold_v_mant=100, decay_v=0, decay_I=4096)\n Initializes the LoihiNeuronGroup and the NeuronGroup\n \"\"\"\n\n def __init__(self, N, refractory=1, threshold_v_mant=100, decay_v=0, decay_I=4096, name='loihi_neurongroup*', eqn_str=''):\n \"\"\" Initializes the LoihiStateMonitor and the StateMonitor\n\n The init method checks if the given parameters are valid. Afterwards the\n equations for the neuron model are defined. Finally, the Brian2 NeuronGroup\n is initialized. Parameters are given as integers, not time units\n (seconds, ms, etc.) should be used. They are added by this method automatically.\n As integration method, forward euler is used.\n\n Parameters\n ----------\n N : int\n Number of neurons in the group.\n refractory: int (1...64), optional\n The refactory period of the neuron.\n threshold_v_mant: int (0...131071), optional\n The mantissa of the membrane voltage threshold.\n decay_v : int (0...4096), optional\n The membrane voltage decay (note that tau_v = 4096/decay_v)\n decay_I : int (0...4096), optional\n The current decay (note that tau_I = 4096/decay_I)\n name : str, optional\n A unique name for the group, otherwise use ``loihi_neurongroup_0``, etc.\n eqn_str : str, optional\n A str to be added to the neuron model equations, e.g. to define additional variables\n \"\"\"\n\n # Check if tau values are in a range of 0...4096 and integer\n check_range_and_int(decay_v, 'decay_v', low=0, high=4096)\n check_range_and_int(decay_I, 'decay_I', low=0, high=4096)\n\n # Check if refactory period is in a range of 1...64 and integer\n check_range_and_int(refractory, 'refractory', low=1, high=64)\n\n # Check if threshold_v_mant is in a range of 0...131071 and integer\n check_range_and_int(threshold_v_mant, 'threshold_v_mant', low=0, high=131071)\n\n # Define parameters for equation\n # Note: tau is inversed to avoid division by zero\n p = {\n '1_tau_v': decay_v/2**12, # 1/tau_v\n '1_tau_I': decay_I/2**12, # 1/tau_I\n 'v_th': threshold_v_mant * 2**6\n }\n\n # Define parameters for printing\n self.loihi_parameters = {\n **p,\n 'decay_v': decay_v,\n 'decay_I': decay_I,\n 'tau_v': 2**12/decay_v if decay_v != 0 else 'inf',\n 'tau_I': 2**12/decay_I if decay_I != 0 else 'inf',\n 'refractory': refractory,\n 'threshold_v_mant': threshold_v_mant,\n 'reset_v': 0\n }\n\n # Neuron model\n equations_LIF = '''\n rnd_v = int(sign(v)*ceil(abs(v*{1_tau_v}))) : 1\n rnd_I = int(sign(I)*ceil(abs(I*{1_tau_I}))) : 1\n dv/dt = -rnd_v/ms + I/ms: 1 (unless refractory)\n dI/dt = -rnd_I/ms: 1\n '''.format(**p)\n\n # Add equation string\n equations_LIF += eqn_str\n \n # Create Brian neuron group\n\n super().__init__(\n N,\n re.sub('(?<=\\\\n)[ \\t]*', '', equations_LIF),\n threshold='v > {v_th}'.format(**p),\n reset='v = 0',\n refractory=refractory*ms,\n method='forward_euler',\n name=name\n )\n\n # Set initial voltage\n self.v = 0\n\n def __str__(self):\n \"\"\"Creates a user friendly overview over all parameters\n\n This function makes it easy to get a transparent overview over all neuron group parameters.\n Call: print(LoihiNeuronGroup.__str__())\n \"\"\"\n\n print_string = 'Parameters of the neuron group:\\n\\n'\n for key, value in self.loihi_parameters.items():\n print_string += '{:18} {:}\\n'.format(key, value)\n\n return print_string\n"
},
{
"alpha_fraction": 0.5813277959823608,
"alphanum_fraction": 0.5854771733283997,
"avg_line_length": 23.845361709594727,
"blob_id": "dbb8a22133b3e36b6af1ff3cff241e8b7af580a2",
"content_id": "00cd1c78346a03bb8c735e470006ff09a0c3c54a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2410,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 97,
"path": "/parameter_checks.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "def check_range_and_int(val, name, low=0, high=127):\n \"\"\"Checks a parameter to match Loihi\n\n Calls two methods to check if the parameter value is\n integer and in a range between low and high.\n\n Parameters\n ----------\n val : int\n The value of the parameter\n name : str\n The name of the parameter\n low : int, optional\n The lower bound of the parameter\n high : int, optional\n The upper bound of the parameter\n \"\"\"\n check_int(val, name)\n check_range(val, name, low, high)\n\ndef check_lower_and_int(val, name, low=0):\n \"\"\"Checks a parameter to match Loihi\n\n Calls two methods to check if the parameter value is\n integer and in greater than low.\n\n Parameters\n ----------\n val : int\n The value of the parameter\n name : str\n The name of the parameter\n low : int, optional\n The lower bound of the parameter\n \"\"\"\n check_int(val, name)\n check_lower(val, name, low)\n\ndef check_lower(val, name, low=0):\n \"\"\"Checks if a parameter is greater or equal than low\n\n Parameters\n ----------\n val : int\n The value of the parameter\n name : str\n The name of the parameter\n low : int, optional\n The lower bound of the parameter\n\n Raises\n ------\n Exception\n If value is lower than low.\n \"\"\"\n if (val < low):\n raise Exception(str(name) + \" has to be greater or equal to \" +str(low)+ \".\")\n\ndef check_range(val, name, low=0, high=127):\n \"\"\"Checks if a parameter is between low and high\n\n Parameters\n ----------\n val : int\n The value of the parameter\n name : str\n The name of the parameter\n low : int, optional\n The lower bound of the parameter\n high : int, optional\n The upper bound of the parameter\n\n Raises\n ------\n Exception\n If value is lower than low of greater than high.\n \"\"\"\n if (val < low) or (val > high):\n raise Exception(str(name) + \" has to be between \" +str(low)+ \" and \" +str(high)+ \".\")\n\ndef check_int(val, name):\n \"\"\"Checks if a parameter is of type integer\n\n Parameters\n ----------\n val : int\n The value of the parameter\n name : str\n The name of the parameter\n\n Raises\n ------\n Exception\n If value not integer\n \"\"\"\n if not isinstance(val, int):\n raise Exception(str(name) + \" has to be an integer.\")\n"
},
{
"alpha_fraction": 0.5983231663703918,
"alphanum_fraction": 0.6048018336296082,
"avg_line_length": 38.17910385131836,
"blob_id": "0c234b17e702c45e0363b3a2e7fd38248f4f9a7d",
"content_id": "662088e9ac78326b83c1a5c4e59c3642f37488f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2624,
"license_type": "permissive",
"max_line_length": 176,
"num_lines": 67,
"path": "/loihi_state_monitor.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "from brian2 import StateMonitor\n\nclass LoihiStateMonitor(StateMonitor):\n \"\"\"\n The LoihiStateMonitor extends the StateMonitor class from Brian2.\n\n This class creates a Brian2 state monitor and updates the schedule for\n reading the monitored values. This schedule change is performed to\n produce the same outputs as in Loihi.\n\n Methods\n -------\n __init__(source, variable, record=True, order=0)\n Initializes the LoihiStateMonitor and the StateMonitor\n \"\"\"\n\n def __init__(self, source, variable, record=True, order=0, name='loihi_statemonitor*'):\n \"\"\" Initializes the LoihiStateMonitor and the StateMonitor\n\n First, a StateMonitor is initialized, based on the given parameters.\n Afterwards, the schedule for monitoring the values is updated. For\n parameters relating to the neuron, the 'end' has to be chosen. And\n for those telating to the synapse, the 'synapses' has to be chosen.\n\n Parameters\n ----------\n source : `Group`\n Which object to record values from.\n variable : str\n Which variable to record, check the `state` object for details.\n record : bool, sequence of ints\n Which indices to record, nothing is recorded for ``False``,\n everything is recorded for ``True`` (warning: may use a great deal of\n memory), or a specified subset of indices.\n order : int, optional\n The priority of of this group for operations occurring at the same time\n step and in the same scheduling slot. Defaults to 0.\n name : str, optional\n A unique name for the object, otherwise will use\n ``source.name+'loihi_statemonitor_0'``, etc.\n \"\"\"\n\n # Check if only one varibable is given\n if isinstance(variable, list):\n raise Exception('In the Loihi emulator, you can only define one varible in every state monitor. If you need to probe more variables, create another state monitor.')\n\n # Define Brian state monitor\n super().__init__(\n source,\n variable,\n record=record,\n order=order,\n name=name\n )\n\n # Update when states should be monitored\n if (variable in ['v', 'w', 'w_act']):\n self.when = 'end'\n #if (variable in ['I', 'x1', 'x2', 'y1', 'y2', 'y3', 'w', 'w_act']):\n if (variable in ['I', 'x1', 'x2', 'y1', 'y2', 'y3']):\n self.when = 'synapses'\n #@property\n #def t(self):\n \"\"\"\n Returns t as ints without ms\n \"\"\"\n # return self._t/ms"
},
{
"alpha_fraction": 0.6956287026405334,
"alphanum_fraction": 0.723475456237793,
"avg_line_length": 44.41666793823242,
"blob_id": "1fe2fe9a7c29105db0da6525bb25ffa4f411d7e8",
"content_id": "9a7dc1b38aa5111c249d141217244b4988b445c0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9267,
"license_type": "permissive",
"max_line_length": 293,
"num_lines": 204,
"path": "/README.md",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "# A Loihi emulator based on Brian2\n\nThe package extends *Brian2* classes such that they match Loihi simulations. While the neuron and synapse model results in an exact match to *Loihi*, the pre- and post-synaptic traces have very small variations from the *Loihi* chip due to stochastic rounding.\n\nFurther details are explained in a paper coming soon.\n\n## Installation\n\n```\npip install brian2-loihi\n```\n\n### Requirements and dependencies\n\n**Python 3.6** or higher is required.\n\nDependencies are automatically installed by the pip package manager.\n\nIf the source code is used directly, the following packages need to be installed:\n\n* [Brian2 (2.4.2 or higher)](https://brian2.readthedocs.io/en/stable/)\n* [Numpy](https://numpy.org/)\n\n## Usage\n\nFive *Brian2* classes are extended. Available parameters are reported below. Further down you find example code.\n\n**Note**:\n\n* It is important to use a `LoihiNetwork`. The *Brian2* magic network approach is not supported.\n* Don't change the `defaultclock.dt` value. It is set to match *Loihi*.\n* Don't reorder the network simulation `schedule`.\n\nFirst import the package as:\n\n```\nfrom brian2_loihi import *\n```\n\nThe following classes can be used:\n\n### LoihiNetwork\n\nExtends the `Network` class from *Brian2* and supports the same parameters.\n\n### LoihiNeuronGroup\n\nExtends the `NeuronGroup` class from *Brian2* and supports the following parameters:\n\n* **N** (int): Number of neurons in the group.\n* **refractory** (int, 1...64, optional): The refactory period of the neuron.\n* **threshold_v_mant** (int, 0...131071, optional): The mantissa of the membrane voltage threshold.\n* **decay_v** (int, 0...4096, optional): The membrane voltage decay (note that tau_v = 4096/decay_v)\n* **decay_I** (int, 0...4096, optional): The current decay (note that tau_I = 4096/decay_I)\n* **name** (str, optional): A unique name for the group, otherwise use `loihi_neurongroup_0`, etc.\n\n### LoihiSynapses\n\nExtends the `Synapses` class from *Brian2* and supports the following parameters:\n\n* **source** (`SpikeSource`): The source of spikes, e.g. a NeuronGroup.\n* **target** (`Group`, optional): The target of the spikes, typically a NeuronGroup. If none is given, the same as source().\n* **delay** (int, optional): The synaptic delay.\n* **dw** (str, optional): Learning rule, using the pre- and post-synaptic traces. Also constant values are allowed. Note that only `*`, `-` and `+` is allowed.\n* **w_exp** (int, optional): Weight exponent which scales the weights by 2^(6 + w_exp). The weight exponent can be between -8 and 7.\n* **sign_mode** (int, optional): Defines if the synapses are mixed (1), excitatory (2) or inhibitory (3). Excitatory synapses are default. `synapse_sign_mode` can be used for defining the sign mode.\n* **num_weight_bits** (int, optional): Defines the precision of the weight, default is 8 bits. `num_weight_bits` is in a range between 0 and 8.\n* **imp_x1** (int, optional): The impulse of the first synaptic pre trace x1. The impulse is between 0 and 127.\n* **tau_x1** (int, optional): The time constant of the first synaptic pre trace x1. Tau has to be greater or equal to 0.\n* **imp_x2** (int, optional): The impulse of the first synaptic pre trace x2. The impulse is between 0 and 127.\n* **tau_x2** (int, optional): The time constant of the first synaptic pre trace x2. Tau has to be greater or equal to 0.\n* **imp_y1** (int, optional): The impulse of the first synaptic post trace y1. The impulse is between 0 and 127.\n* **tau_y1** (int, optional): The time constant of the first synaptic pre trace y1. Tau has to be greater or equal to 0.\n* **imp_y2** (int, optional): The impulse of the first synaptic post trace y2. The impulse is between 0 and 127.\n* **tau_y2** (int, optional): The time constant of the first synaptic pre trace y2. Tau has to be greater or equal to 0.\n* **imp_y3** (int, optional): The impulse of the first synaptic post trace y3. The impulse is between 0 and 127.\n* **tau_y3** (int, optional): The time constant of the first synaptic pre trace y3. Tau has to be greater or equal to 0.\n* **name** (str, optional): The name for this object. If none is given, a unique name of the form. `loihi_synapses`, `loihi_synapses_1`, etc. will be automatically chosen.\n\n### LoihiStateMonitor\n\nExtends the `StateMonitor` class from *Brian2* and supports the following parameters:\n\n* **source** (`Group`): Which object to record values from.\n* **variable** (str): Which variables to record, check the `state` object for details.\n* **record** (bool, sequence of ints): Which indices to record, nothing is recorded for ``False``, everything is recorded for ``True`` (warning: may use a great deal of memory), or a specified subset of indices.\n* **order** (int, optional): The priority of of this group for operations occurring at the same time step and in the same scheduling slot. Defaults to 0.\n* **name** (str, optional): A unique name for the object, otherwise will use `source.name+'loihi_statemonitor_0'`, etc.\n\n### LoihiSpikeMonitor\n\nExtends the `SpikeMonitor` class from *Brian2* and supports the following parameters:\n\n* **source** (`Group`): Which object to record values from.\n* **variable** (str, optional): Which variables to record at the time of the spike (in addition to the index of the neuron). Can be the name of a variable or a list of names\n* **record** (bool, sequence of ints, optional): Which indices to record, nothing is recorded for ``False``, everything is recorded for ``True`` (warning: may use a great deal of memory), or a specified subset of indices.\n* **order** (int, optional): The priority of of this group for operations occurring at the same time step and in the same scheduling slot. Defaults to 0.\n* **name** (str, optional): A unique name for the object, otherwise will use `source.name+'_loihi_spikemonitor_0'`, etc.\n\n### LoihiSpikeGeneratorGroup\n\nExtends the `SpikeGeneratorGroup` class from *Brian2* and supports the following parameters:\n\n* **N** (int): The number of \"neurons\" in this group\n* **indices** (array of integers): The indices of the spiking cells\n* **times** (list (int)): The spike times for the cells given in ``indices``. Has to have the same length as ``indices`` and has to be integer (without time units)\n* **period** (int, optional): If this is specified, it will repeat spikes with this period. A period of 0 means not repeating spikes.\n* **order** (int, optional): The priority of of this group for operations occurring at the same time step and in the same scheduling slot. Defaults to 0.\n* **sorted** (bool, optional): Whether the given indices and times are already sorted. Set to ``True`` if your events are already sorted (first by spike time, then by index), this can save significant time at construction if your arrays contain large numbers of spikes. Defaults to ``False``.\n* **name** (str, optional): A unique name for the object, otherwise will use `loihi_spikegeneratorgroup_0'`, etc.\n\n## Example\n\nMore examples and further details are provided in this repository:\n\nhttps://github.com/sagacitysite/brian2_loihi_utils\n\nHere we just provide a simple example.\n\n### Single neuron\n\n```\nimport matplotlib.pyplot as plt\nfrom brian2_loihi import *\n\n# Define a single neuron\nloihi_group = LoihiNeuronGroup(\n 1,\n refractory=2,\n threshold_v_mant=400,\n decay_v=1024,\n decay_I=1024\n)\n\n# Excitatory input spikes\nex_neuron_indices = [0, 0, 0, 0]\nex_spike_times = [12, 14, 40, 80]\n\n# Inhibitory input spikes\nin_neuron_indices = [0, 0, 0]\nin_spike_times = [50, 60, 90]\n\n# Define spike generators\ngenerator_ex = LoihiSpikeGeneratorGroup(1, ex_neuron_indices, ex_spike_times)\ngenerator_in = LoihiSpikeGeneratorGroup(1, in_neuron_indices, in_spike_times)\n\n# Connect excitatory generator with neuron\nsyn_ex = LoihiSynapses(generator_ex, loihi_group, sign_mode=synapse_sign_mode.EXCITATORY)\nsyn_ex.connect()\nsyn_ex.w = 124\n\n# Connect inhibitory generator with neuron\nsyn_in = LoihiSynapses(generator_in, loihi_group, sign_mode=synapse_sign_mode.INHIBITORY)\nsyn_in.connect()\nsyn_in.w = -124\n\n# Probe synaptic input using a state monitor\nmon_I = LoihiStateMonitor(loihi_group, 'I')\n# Probe voltage using a state monitor\nmon_v = LoihiStateMonitor(loihi_group, 'v')\n\n# NOTE: It is important to use the LoihiNetwork,\n# using Brian's magic network is not provided\nnet = LoihiNetwork(\n loihi_group,\n generator_in,\n generator_ex,\n syn_ex,\n syn_in,\n mon_I,\n mon_v\n)\n\n# Run the simulation\nnet.run(100, report='text')\n\n# Plot synaptic input (current)\nplt.plot(mon_I.I[0])\nplt.title('Synaptic input / Current')\npl = plt.show()\n\n# Plot voltage\nplt.plot(mon_v.v[0])\nplt.title('Voltage')\npl = plt.show()\n```\n\n## References\n\n### Emulator\n\nThe emulator is described in\n\n... coming soon ...\n\n### Loihi\n\nThe Loihi chip was developed by Intel and is introduced in\n\n[M. Davies et al., \"Loihi: A Neuromorphic Manycore Processor with On-Chip Learning,\" in IEEE Micro, vol. 38, no. 1, pp. 82-99, January/February 2018, doi: 10.1109/MM.2018.112130359.](https://doi.org/10.1109/MM.2018.112130359)\n\nSome further details are given in\n\n[C. Lin et al., \"Programming Spiking Neural Networks on Intel’s Loihi,\" in Computer, vol. 51, no. 3, pp. 52-61, March 2018, doi: 10.1109/MC.2018.157113521.](https://doi.org/10.1109/MC.2018.157113521)\n"
},
{
"alpha_fraction": 0.8080454468727112,
"alphanum_fraction": 0.8176650404930115,
"avg_line_length": 39.83928680419922,
"blob_id": "12f23955a9f46e660726fe39d90df2fef08c2f71",
"content_id": "8c4f3a9c52c38d45194cc3f5e93869baa1941de3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2287,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 56,
"path": "/__init__.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "\"\"\"Loihi emulator based on Brian2\n\nThis package allows an emulation of the Loihi chip in Brian2.\n\nNot all features of Loihi are available in this version. In the following you find a\ndescription of the provided precision and available parameters.\n\nThe package provides the following precision:\n\n* The neuron and synapse model result in a perfect match with Loihi\n* The learning rule matches Loihi beside small fluctuations due to stochastic rounding\n\nThe following parameters can be chosen\n\n* Neuron model: threshold, refactory period, current decay, voltage decay\n* Synapse model: delay, weights, traces x1, x2, y1, y2, y3\n\nThe module mainly extends Brian2 classes:\n* The LoihiNetwork extends the Brian2 Network class\n* The LoihiNeuronGroup extends the Brian2 NeuronGroup class\n* The LoihiSynapses extends the Brian2 Synapses class\n* The LoihiStateMonitor extends the Brian2 StateMonitor class\n* The LoihiSpikeMonitor extends the Brian2 SpikeMonitor class\n* The LoihiSpikeGeneratorGroup extends the Brian2 SpikeGeneratorGroup class\n\nNote that the attributes of the extended classes deviate in most cases from\nthe attributes available in the original Brian2 classes.\n\"\"\"\n\n\"\"\"\nTODO Start\n\"\"\"\n# print(LoihiSynapses), print(LoihiNeuronGroup)\n# Parameter check: warning instead of exception?\n# Parameter names according to NxSDK? Pro: consistency, Con: Nobody knows the NxSDK docs\n# setter and getter for parameters using python properties? (see https://www.python-kurs.eu/python3_properties.php)\n# Create documenation?\n\"\"\"\nTODO End\n\"\"\"\n\n# Import all necessary modules\n#from brian2_loihi.constants import state, synapse_sign_mode\n#from brian2_loihi.loihi_network import LoihiNetwork\n#from brian2_loihi.loihi_neuron_group import LoihiNeuronGroup\n#from brian2_loihi.loihi_synapses import LoihiSynapses\n#from brian2_loihi.loihi_state_monitor import LoihiStateMonitor\n#from brian2_loihi.loihi_spike_generator_group import LoihiSpikeGeneratorGroup\n\nfrom .constants import state, synapse_sign_mode\nfrom .loihi_network import LoihiNetwork\nfrom .loihi_neuron_group import LoihiNeuronGroup\nfrom .loihi_synapses import LoihiSynapses\nfrom .loihi_state_monitor import LoihiStateMonitor\nfrom .loihi_spike_monitor import LoihiSpikeMonitor\nfrom .loihi_spike_generator_group import LoihiSpikeGeneratorGroup\n"
},
{
"alpha_fraction": 0.6359712481498718,
"alphanum_fraction": 0.6388489007949829,
"avg_line_length": 41.989688873291016,
"blob_id": "fa56cf64f7549fab0a86d8a2cc498b5831f2bd94",
"content_id": "caf0fada7d3792bba3513627bf3b45d08f6c08ff",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4170,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 97,
"path": "/loihi_network.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "from brian2 import Network, ExplicitStateUpdater, StateUpdateMethod, defaultclock, ms\nimport warnings\n\nclass LoihiNetwork(Network):\n \"\"\"\n The LoihiNetwork extends the Network class from Brian2.\n\n Note that it is important to use the LoihiNetwork class and not the magic network.\n\n Methods\n -------\n __init__(*objs, **kwds)\n Initializes the LoihiNetwork and the Network\n run(duration, **kwargs)\n Checks for problems and runs the Brian network simulation\n \"\"\"\n\n def __init__(self, *objs, **kwds):\n \"\"\" Initializes the LoihiNetwork and the Network\n\n This method registers two ExplicitStateUpdater as StateUpdateMethod. These update\n methods are used by Loihi to integrate diffeential equations. Further, the dt is\n set to 1, again to match a Loihi simulation. Afterwards the __init__() method from\n the Brian2 Network is called, initializing a default Brian2 network. Finally, the\n default schedule from Brian2 is reordered to match Loihi. All arguments are passed\n to the parent init method.\n\n Parameters\n ----------\n *objs :\n All arguments defined by the parent class\n **kwds : optional\n All keyword arguments defined by the parent class\n \"\"\"\n\n # Define first order forward euler, if not already defined\n if ('forward_euler' not in StateUpdateMethod.stateupdaters):\n eq_forward_euler = '''\n x_new = x + dt * f(x,t)\n '''\n forward_euler = ExplicitStateUpdater(eq_forward_euler, stochastic='none')\n StateUpdateMethod.register('forward_euler', forward_euler)\n\n # Define exact state updater for the pre/post traces for learning, if not already defined\n if ('exact_synapse' not in StateUpdateMethod.stateupdaters):\n eq_exact_synapse = '''\n x_0 = dt*f(x,t)\n x_new = int(x_0)\n '''\n exact_synapse = ExplicitStateUpdater(eq_exact_synapse, stochastic='none')\n StateUpdateMethod.register('exact_synapse', exact_synapse)\n\n # Set default clock dt\n defaultclock.dt = 1*ms\n\n # Call super init\n super().__init__(*objs, **kwds)\n\n # Reorder schedule to match Loihi\n self.schedule = ['start', 'synapses', 'groups', 'thresholds', 'resets', 'end']\n\n def run(self, duration, **kwargs):\n \"\"\" Checks for problems and runs the Brian network simulation\n\n The run method overwrites the run method from the Network class. Just before running\n the simulation, it checks if the most important settings are still valid. If not, a\n warning is shown. The user should be able to choose other settings, but should be warned\n that results can then vary from Loihi. Afterwards the parent run() method is called.\n The duration is modified, such that the LoihiNetwork run() method will only take an integer\n without Brian's time information (e.g. ms). All keyword arguments are passed to the\n parent run() method.\n\n Parameters\n ----------\n duration : int\n Duration of the simulation as an integer value, no time (e.g. ms) should to be added\n **kwargs : optional\n All keyword arguments defined by the parent method\n\n Raises\n ------\n Warning\n If defautlclock dt value has changed and is not set to 1ms any more\n Warning\n If the schedule has changed and is not in the Loihi-like order any more\n \"\"\"\n\n # Check if the user has manually changed defaultclock and print warning\n if (defaultclock.dt != 1*ms):\n warnings.warn(\"The defaultclock.dt is not set to 1*ms, this may cause results which deviate from Loihi.\")\n\n # Check if the user has manually changed schedule and print warning\n if (self.schedule != ['start', 'synapses', 'groups', 'thresholds', 'resets', 'end']):\n warnings.warn(\"The schedule has changed, this may cause results which deviate from Loihi.\")\n\n # Call run method from Brian Network\n super().run(duration*ms, **kwargs)\n"
},
{
"alpha_fraction": 0.6091644167900085,
"alphanum_fraction": 0.613656759262085,
"avg_line_length": 35.49180221557617,
"blob_id": "96d85a0bd969aebf43422a6cf609b8e399918b4b",
"content_id": "3fda9ebec67e1fb29423816d144bcca4b4b7d9b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2226,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 61,
"path": "/loihi_spike_monitor.py",
"repo_name": "sagacitysite/brian2_loihi",
"src_encoding": "UTF-8",
"text": "from brian2 import SpikeMonitor\n\nclass LoihiSpikeMonitor(SpikeMonitor):\n \"\"\"\n The LoihiSpikeMonitor extends the SpikeMonitor class from Brian2.\n\n This class creates a Brian2 spike monitor and updates the schedule for\n reading the monitored values. This schedule change is performed to\n produce the same outputs as in Loihi.\n\n Methods\n -------\n __init__(source, variable, record=True, order=0)\n Initializes the LoihiSpikeMonitor and the SpikeMonitor\n \"\"\"\n\n @property\n def t(self):\n \"\"\" Property decorator to inclulde a getter for the spike times\n Returns\n -------\n list (int)\n Return spike times as int\n \"\"\"\n return (self.t_*1000).astype(int)\n\n def __init__(self, source, variable=None, record=True, order=None, name='loihi_spikemonitor*'):\n \"\"\" Initializes the LoihiSpikeMonitor and the SpikeMonitor\n\n First, a SpikeMonitor is initialized, based on the given parameters.\n Afterwards, the schedule for monitoring the values is updated.\n Parameters\n ----------\n source : `Group`\n Which object to record values from.\n variable : str, optional\n Which variables to record at the time of the spike (in addition to the index of the neuron).\n Can be the name of a variable or a list of names\n record : bool, sequence of ints, optional\n Which indices to record, nothing is recorded for ``False``,\n everything is recorded for ``True`` (warning: may use a great deal of\n memory), or a specified subset of indices.\n order : int, optional\n The priority of of this group for operations occurring at the same time\n step and in the same scheduling slot. Defaults to 0.\n name : str, optional\n A unique name for the object, otherwise will use\n ``source.name+'_loihi_spikemonitor_0'``, etc.\n \"\"\"\n\n # Define Brian spike monitor\n super().__init__(\n source,\n variable,\n record=record,\n order=order,\n name=name\n )\n\n # Update when states should be monitored\n self.when = 'end'\n"
}
] | 10 |
YumaHashimoto/test_django
|
https://github.com/YumaHashimoto/test_django
|
68331b01b7d6f7ed8febce50e52b63a11fbd6584
|
aff67785b8087fbc92f7077b1d19597d0eaf9918
|
14e0658ff2ee164ce8400d95ec8c3521089acebb
|
refs/heads/master
| 2018-02-08T08:41:06.593994 | 2017-07-11T04:00:05 | 2017-07-11T04:05:07 | 96,523,169 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6410398483276367,
"alphanum_fraction": 0.6504424810409546,
"avg_line_length": 41.541175842285156,
"blob_id": "208fb2529a95be1dbfe3f2724b52699383637499",
"content_id": "02ad58a97faf1923f0582c167222a9a8ef93d658",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3616,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 85,
"path": "/polls/tests.py",
"repo_name": "YumaHashimoto/test_django",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\n\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.test import TestCase\n\nfrom .models import Question\n\n# Create your tests here.\n\nclass QuestionModelTests ( TestCase ):\n\n def test_was_published_recently_with_old_question( self ):\n time = timezone.now() - datetime.timedelta( days=1, seconds=1 )\n old_question = Question( pub_date=time )\n self.assertIs( old_question.was_published_recently(), False )\n\n def test_was_published_recently_with_recent_question( self ):\n time = timezone.now() - datetime.timedelta( hours=23, minutes=59, seconds=59 )\n recent_question = Question( pub_date=time )\n self.assertIs( recent_question.was_published_recently(), True )\n\n def test_was_published_recently_with_future_question( self ):\n time = timezone.now() + datetime.timedelta( days=30 )\n future_question = Question( pub_date=time )\n self.assertIs( future_question.was_published_recently(), False )\n\n\ndef create_question ( question_text, days ):\n time = timezone.now() + datetime.timedelta( days=days )\n return Question.objects.create( question_text=question_text, pub_date=time )\n\nclass QuestionIndexViewTests ( TestCase ):\n def test_no_questions ( self ):\n response = self.client.get( reverse( 'polls:index' ) )\n self.assertEqual( response.status_code, 200 )\n self.assertContains( response, \"No polls are available.\" )\n self.assertQuerysetEqual( response.context[ 'latest_question_list' ], [] )\n\n def test_past_question ( self ):\n create_question( question_text=\"Past question.\", days=-30 )\n response = self.client.get( reverse( 'polls:index' ) )\n self.assertQuerysetEqual(\n response.context[ 'latest_question_list' ],\n [ '<Question: Past question.>' ]\n )\n\n def test_future_question ( self ):\n create_question( question_text=\"Future question.\", days=30 )\n response = self.client.get( reverse( 'polls:index' ) )\n self.assertContains( response, \"No polls are available.\" )\n\n def test_future_question_and_past_question ( self ):\n create_question( question_text=\"Past question.\", days=-30 )\n create_question( question_text=\"Future question.\", days=30 )\n response = self.client.get( reverse( 'polls:index' ) )\n self.assertQuerysetEqual(\n response.context[ 'latest_question_list' ],\n [ '<Question: Past question.>' ]\n )\n\n def test_two_past_questions ( self ):\n create_question( question_text=\"Past question 1.\", days=-30 )\n create_question( question_text=\"Past question 2.\", days=-5 )\n response = self.client.get( reverse( 'polls:index' ) )\n self.assertQuerysetEqual(\n response.context[ 'latest_question_list' ],\n [ '<Question: Past question 2.>', '<Question: Past question 1.>' ]\n )\n\nclass QuestionDetailViewTests ( TestCase ):\n def test_future_question ( self ):\n future_question = create_question( question_text='Future question.', days=5 )\n url = reverse( 'polls:detail', args=( future_question.id, ) )\n response = self.client.get( url )\n self.assertEqual( response.status_code, 404 )\n\n def test_past_question ( self ):\n past_question = create_question( question_text='Past question.', days=-5 )\n url = reverse( 'polls:detail', args=( past_question.id, ) )\n response = self.client.get( url )\n self.assertContains( response, past_question.question_text )\n"
},
{
"alpha_fraction": 0.6686121821403503,
"alphanum_fraction": 0.6763942837715149,
"avg_line_length": 31.125,
"blob_id": "6d523902308723b1f4f6292381e63da3efa06a90",
"content_id": "0c0747c14d3778d17a7299341eb073e94d160397",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1542,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 48,
"path": "/polls/views.py",
"repo_name": "YumaHashimoto/test_django",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template import loader\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views import generic\n\nfrom .models import Question, Choice\n\n# Create your views here.\n\nclass IndexView( generic.ListView ):\n template_name = 'polls/index.html'\n context_object_name = 'latest_question_list'\n\n def get_queryset( self ):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by( '-pub_date' )[ :5 ]\n\nclass DetailView( generic.DetailView ):\n model = Question\n template_name = 'polls/detail.html'\n\n def get_queryset( self ):\n return Question.objects.filter( pub_date__lte=timezone.now() )\n\nclass ResultsView( generic.DetailView ):\n model = Question\n template_name = 'polls/results.html'\n\ndef vote ( request, question_id ):\n question = get_object_or_404( Question, pk=question_id )\n context = {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n }\n try:\n selected_choice = question.choice_set.get( pk=request.POST[ 'choice' ] )\n except ( KeyError, Choice.DoesNotExist ):\n return render( request, 'polls/detail.html', context )\n else:\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect( reverse( 'polls:results', args=( question_id, ) ) )\n"
},
{
"alpha_fraction": 0.6747787594795227,
"alphanum_fraction": 0.6880530714988708,
"avg_line_length": 28.161291122436523,
"blob_id": "12dac6a8d10bc81b4b602542567c6c78b0bc1a5d",
"content_id": "dc9c10f616bce748ebf5695bbf36eaeb70944c09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 904,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 31,
"path": "/polls/models.py",
"repo_name": "YumaHashimoto/test_django",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.encoding import python_2_unicode_compatible\n\n# Create your models here.\n\n@python_2_unicode_compatible\nclass Question ( models.Model ):\n question_text = models.CharField( max_length=200 )\n pub_date = models.DateTimeField( 'date published' )\n\n def __str__ ( self ):\n return self.question_text\n\n def was_published_recently ( self ):\n now = timezone.now()\n return now - datetime.timedelta( days=1 ) <= self.pub_date <= now\n\n@python_2_unicode_compatible\nclass Choice ( models.Model ):\n question = models.ForeignKey( Question, on_delete=models.CASCADE )\n choice_text = models.CharField( max_length=200 )\n votes = models.IntegerField( default=0 )\n\n def __str__ ( self ):\n return self.choice_text\n"
}
] | 3 |
math-jam/first-api-python
|
https://github.com/math-jam/first-api-python
|
eb65a585e32c6ff91f7a28c126a3e0b1415d0a16
|
3f36954995279703f0d1fd5a7a96ee5f7e1eb0a5
|
68b8f92c64a2eecf15848f03393c429c51104f0e
|
refs/heads/master
| 2023-01-22T07:06:11.994775 | 2020-11-28T16:17:55 | 2020-11-28T16:17:55 | 316,772,980 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6262136101722717,
"alphanum_fraction": 0.6650485396385193,
"avg_line_length": 17.81818199157715,
"blob_id": "c15788efcfaf573b97e696cd7b8e85248dc685c3",
"content_id": "a08e6547725d95b1881b5c4dc9c35c7d8118aea0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 11,
"path": "/index.py",
"repo_name": "math-jam/first-api-python",
"src_encoding": "UTF-8",
"text": "import os\nimport requests\nfrom flask import Flask, request, jsonify, json\n\napp = Flask(__name__)\n\[email protected](\"/primeira/<nome>\")\ndef ok(nome): \n return \"Nome: \"+nome\n\napp.run(host=\"0.0.0.0\", port = 2000)"
}
] | 1 |
couchbaselabs/try-cb-python
|
https://github.com/couchbaselabs/try-cb-python
|
475c7f4b9fdafe71df4af02661cc6adfee848093
|
f51481dba9e1a4acffbd58bcd638202981e9b9dc
|
c34278c7965bdf5428756a7386da626e2a1ecdbe
|
refs/heads/7.1
| 2023-08-22T14:50:57.414203 | 2023-08-10T09:33:11 | 2023-08-10T09:33:11 | 78,461,833 | 22 | 30 |
NOASSERTION
| 2017-01-09T19:35:45 | 2022-08-03T06:10:10 | 2023-08-10T09:35:48 |
Python
|
[
{
"alpha_fraction": 0.7518856525421143,
"alphanum_fraction": 0.7575426697731018,
"avg_line_length": 43.78666687011719,
"blob_id": "8f30ef7ff34754e31a6e91419bdea0894246fdec",
"content_id": "5eb9482751efe3cd971f55a6c8a9d6737fb2f5ea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10076,
"license_type": "permissive",
"max_line_length": 295,
"num_lines": 225,
"path": "/README.md",
"repo_name": "couchbaselabs/try-cb-python",
"src_encoding": "UTF-8",
"text": "# Couchbase Python travel-sample Application REST Backend\n\nThis is a sample application for getting started with [Couchbase Server] and the Python SDK.\nThe application runs a single page web UI for demonstrating SQL++ for Documents, Sub-document requests and Full-Text Search (FTS) querying capabilities.\nIt uses Couchbase Server together with the [Flask] web framework for [Python], [Swagger] for API documentation, [Vue] and [Bootstrap].\n\nThe application is a flight planner that allows the user to search for and buy flight routes based on airports and dates.\nAirport selection happens dynamically using an autocomplete box bound to SQL++ queries on the server side. After selecting a date, it searches\nfor applicable air flight routes from a populated database. An additional page allows users to search for Hotels using less structured keywords.\n\n\n\n## Prerequisites\n\nTo download the application you can either download [the archive](https://github.com/couchbaselabs/try-cb-python/archive/master.zip) or clone the repository:\n\n git clone https://github.com/couchbaselabs/try-cb-python.git\n\nYou can run the application with Docker, which starts all components for you.\nYou can also run it in a Mix-and-Match style, described [here](#mix-and-match-services).\n\n\n## Running the Application with Docker\n\nYou need [Docker](https://docs.docker.com/get-docker/) installed on your machine to run this application. \nA provided [_Dockerfile_](Dockerfile) and a [_docker-compose.yml_](docker-compose.yml) run Couchbase Server 7.x, the front-end [Vue application](https://github.com/couchbaselabs/try-cb-frontend-v2.git) and the Python REST API.\n\nTo launch the full application, run this command from a terminal:\n\n docker compose --profile local up\n\n> **_NOTE:_** You may need more than the default RAM to run the images.\nCouchbase have tested the travel-sample apps with 4.5 GB RAM configured in Docker's Preferences -> Resources -> Memory.\nWhen you run the application for the first time, it pulls/builds the relevant Docker images, so it may take some time.\n\nThis starts the backend, Couchbase Server 7.x and the Vue frontend app.\n\nYou can find the backend API at http://localhost:8080/, the UI at\nhttp://localhost:8081/ and Couchbase Server at http://localhost:8091/\n\nYou should then be able to browse the UI, search for airports and get flight route information.\n\nTo end the application press <kbd>Control</kbd>+<kbd>C</kbd> in the terminal\nand wait for `docker-compose` to stop your containers.\n\nRunning the application with the `local` profile pulls an image containing a prebuilt version of the backend. If you want to make changes to the backend, you can run the application with the `local-server` profile, detailed [here](#editing-the-backend). \n\n## Run the Database in Capella\n\nTo run the database in Couchbase Capella, the invocation is as straight-forward, but there are more setup steps:\n\n### Create the Capella Cluster\n\nFirst, [sign up for a Capella account](https://docs.couchbase.com/cloud/get-started/get-started.html) and deploy a cluster.\n\nThe travel application uses the `travel-sample` data bucket, which the cluster imports by default. To verify this, go to **Data Tools > Buckets**. You should see the `travel-sample` bucket with around 63k items.\n\nIf the bucket isn't present, you can import it manually. See [Import](https://docs.couchbase.com/cloud/clusters/data-service/import-data-documents.html) for information about how to import the `travel-sample` bucket.\n\n### Create the Search Index\n\n1. Go to **Data Tools > Search > Create Search Index**\n2. Click **Import from File**\n3. Navigate to the try-cb-python directory, and select `fts-hotels-index.json`\n4. Click **Create Index**\n\nIf you can't use the filesystem with the backend, you can copy + paste the index definition from [this repository](https://raw.githubusercontent.com/couchbaselabs/try-cb-python/HEAD/fts-hotels-index.json) into the **Index Definition** field.\n\n### Create the Database Access\n\nCreate the credentials to log in **Settings > Database Access > Create Database Access**\n\n* Access Name: cbdemo\n* Secret: Password123!\n* Bucket: travel-sample\n* Scopes: All Scopes\n* Access: read/write\n\nClick **Create Database Access** to save your access credentials.\n\n### Allow Your IP\n\nGo to **Settings > Allowed IP Ranges > Add Allowed IP**.\n\nEnter the IP of the system you will be running the application on in the *Allowed IP* field. If this system is the same one you are accessing the Capella UI on, you can click **Add Current IP Address**.\n\nClick **Add Allowed IP** to add the IP address.\n\n### Copy the Connection String\n\nFrom the **Connect** tab, copy your cluster's connection string, which looks something like:\n\n```\ncouchbases://cb.dmt-i0huhchusg9g.cloud.couchbase.com\n```\n\n### Start the Backend and Frontend\n\nRun the following command to start the application.\n\n```\nCB_HOST={your-connection-string} docker-compose --profile capella up\n```\n\nYou only need to set the `CB_HOST` variable to point the backend to your database.\nIf you chose a different username and password than the demo ones, then you also need to set these.\n\n```\n$ CB_HOST={your-connection-string}\n$ CB_USER={your-username}\n$ CB_PSWD={your-password}\n\ndocker compose --profile capella up\n```\n## Mix and Match Services\n\nInstead of running all services, you can start any combination of `backend`,\n`frontend`, `db` via Docker, and take responsibility for starting the other\nservices yourself.\n\nAs the provided `docker-compose.yml` establishes dependencies between the services,\nto make startup as smooth and automatic as possible, there is also an\nalternative `mix-and-match.yml`.\n\n\n### Bring Your Own Database\n\nTo run this application against your own configuration of Couchbase\nServer, you need version 7.0.0 or later with the `travel-sample`\nbucket setup.\n\n> **_NOTE:_** If you aren't using Docker to start Couchbase Server, or you aren't using the\n> provided wrapper `wait-for-couchbase.sh`, you need to create a full-text\n> Search index on travel-sample bucket called 'hotels-index'. You can do this\n> with the following command:\n\n curl --fail -s -u <username>:<password> -X PUT \\\n http://<host>:8094/api/index/hotels-index \\\n -H 'cache-control: no-cache' \\\n -H 'content-type: application/json' \\\n -d @fts-hotels-index.json\n\nWith a running Couchbase Server, you can pass the database details in:\n\n CB_HOST=10.144.211.101 CB_USER=Administrator CB_PSWD=password docker compose -f mix-and-match.yml up backend frontend\n\nThe Docker image runs the same checks as usual, and also creates the\n`hotels-index` if it doesn't already exist.\n\n\n### Running the Python API Application Manually\n\nYou may want to run the backend one your local machine.\nYou can still use Docker to run the Database and Frontend components if desired.\n\n> **_NOTE:_** See above for specific details on running your database in Couchbase Capella.\n\n1. Make sure you have `Python 3.7` or later installed on your machine.\n2. Install the project dependencies by running:\n `pip install -r requirements.txt`\n3. Start the database:\n `docker compose -f mix-and-match.yml up -d db`\n `export CB_HOST=localhost CB_USER=Administrator CB_PSWD=password`\n `./wait-for-couchbase.sh echo Couchbase is ready!`\n The `wait-for-couchbase` wrapper waits until the database has started, and loaded the sample data and indexes.\n If the database is already running, you can skip this step\n4. Start the backend:\n `python3 travel.py -c $CB_HOST -u $CB_USER -p $CB_PSWD`\n5. Start the frontend:\n `docker-compose -f mix-and-match.yml up frontend`\n\n### Running the Frontend Manually\n\nTo run the frontend components manually without Docker, follow the guide\n[here](https://github.com/couchbaselabs/try-cb-frontend-v2)\n\n## Editing the Backend\n\nYou may want to make changes to the backend, without running it manually. Couchbase have provided a profile in the `docker-compose.yml` to run the backend mounted on the code in this directory. This allows you to make changes to the backend code, and see it instantly reflected in the container.\n\nTo start the application in this mode, run the command:\n\n docker compose --profile local-server up\n\nIf your database is running in Capella, run this command instead:\n\n $ CB_HOST={your-connection-string}\n $ CB_USER={your-username}\n $ CB_PSWD={your-password}\n\n docker compose --profile local-capella up\n\nYou still need to complete all the [setup steps](#run-the-database-in-capella).\n\n> **_NOTE:_** As this mode doesn't use a prebuilt image, you may encounter dependency issues when building the backend image. \n\n\n## REST API Reference and Tests\n\nAll the travel-sample apps conform to the same interface, which means that the same database configuration and Vue.js frontend can use any backend.\n\nYou can find the Swagger/OpenApi version 3 documentation on the backend at `http://localhost:8080/apidocs` once you've started the application.\n\nYou can also view a read-only version at https://docs.couchbase.com/python-sdk/current/hello-world/sample-application.html#\n\nTo further verify that every application conforms to the API, there is a [test suite][try-cb-test], which you can run with the command:\n\n```\ndocker-compose --profile test up test\n```\n\nIf you are running locally, and want to extend or modify the travel-sample application, you can make changes to both the code and the tests in parallel:\n\n * Start the backend server locally, for example using \"Running the Python API Application Manually\" above.\n * Check out the [test suite][try-cb-test] repository in a separate working directory, and run the tests manually, as per the instructions.\n\n\n[Couchbase Server]: https://www.couchbase.com/\n[Python SDK]: https://docs.couchbase.com/python-sdk/current/hello-world/overview.html\n[Flask]: https://flask.palletsprojects.com/\n[Python]: https://www.python.org/\n[Swagger]: https://swagger.io/resources/open-api/\n[Vue]: https://vuejs.org/\n[Bootstrap]: https://getbootstrap.com/\n[try-cb-test]: https://github.com/couchbaselabs/try-cb-test/"
},
{
"alpha_fraction": 0.7155361175537109,
"alphanum_fraction": 0.7286652326583862,
"avg_line_length": 20.809524536132812,
"blob_id": "c38d8d5fda2d7a984fe710b4f131862e2844350d",
"content_id": "7b4f7876faba1548d287e0d541f8081b0dd65b96",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 457,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 21,
"path": "/Dockerfile",
"repo_name": "couchbaselabs/try-cb-python",
"src_encoding": "UTF-8",
"text": "FROM python:3.9-slim-bullseye\n\nLABEL maintainer=\"Couchbase\"\n\nWORKDIR /app\n# Requirements need to be copied over manually as volumes are not created until the container is run\nCOPY requirements.txt /app\n\nRUN apt-get update && apt-get install -y \\\n build-essential \\\n cmake \\\n libssl-dev \\\n jq curl\n\nRUN pip install -r requirements.txt\n\n# Expose ports\nEXPOSE 8080\n\n# Set the entrypoint\nENTRYPOINT [\"./wait-for-couchbase.sh\", \"python\", \"travel.py\"]"
},
{
"alpha_fraction": 0.5111228227615356,
"alphanum_fraction": 0.5193642377853394,
"avg_line_length": 36.099998474121094,
"blob_id": "1572daa7419023bae8aef1de03e6d4cfd2b23587",
"content_id": "d26aab96f790c03f1dcd066a06a11a3955ecb0cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32276,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 870,
"path": "/travel.py",
"repo_name": "couchbaselabs/try-cb-python",
"src_encoding": "UTF-8",
"text": "import argparse\nimport math\nimport uuid\nimport jwt # from PyJWT\nfrom datetime import datetime\nfrom random import random\nfrom flasgger import Swagger, SwaggerView\nfrom flask import Flask, jsonify, make_response, request\nfrom flask.blueprints import Blueprint\nfrom flask_classy import FlaskView\nfrom flask_cors import CORS, cross_origin\n\n# Couchbase Imports\nimport couchbase.search as FT\nimport couchbase.subdocument as SD\nfrom couchbase.cluster import Cluster\nfrom couchbase.options import ClusterOptions, SearchOptions\nfrom couchbase.auth import PasswordAuthenticator\nfrom couchbase.exceptions import *\n\n# From Couchbase Server 5.0 onward, there must be a username and password.\n# User must have full access to read/write bucket/data and read access for\n# Query and Search.\n# The default username and password are set in `wait-for-couchbase.sh`\n# -----------LOCAL-----------\n# CONNSTR = 'couchbase://db'\n# USERNAME = 'Administrator'\n# PASSWORD = 'password'\n# ----------CAPELLA----------\n# CONNSTR = 'couchbases://db'\n# USERNAME = 'cbdemo'\n# PASSWORD = 'Password123!'\n# ---------------------------\n\n# Editing this file? Replicate your changes in the 'sample-app.py' file in\n# the 'docs-sdk-python' repo to have these changes appear in the tutorial.\n\nJWT_SECRET = 'cbtravelsample'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-c', '--cluster', help='Connection String i.e. localhost', default='db')\nparser.add_argument('-s', '--scheme', help='couchbase or couchbases', default='couchbase')\nparser.add_argument('-a', '--connectargs', help=\"?any_additional_args\", default=\"\")\nparser.add_argument('-u', '--user', help='User with access to bucket')\nparser.add_argument('-p', '--password', help='Password of user with access to bucket')\n\nargs = parser.parse_args()\n\n# Init CB connection parameters\n\nif not args.cluster:\n raise ConnectionError(\"No value for CB_HOST set!\")\nif not args.user:\n raise ConnectionError(\"No value for CB_USER set!\")\nif not args.password:\n raise ConnectionError(\"No value for CB_PSWD set!\")\n\nif (\"couchbases://\" in args.cluster) or (\"couchbase://\" in args.cluster):\n\tCONNSTR = f\"{args.cluster}{args.connectargs}\"\nelse:\n\tCONNSTR = f\"{args.scheme}://{args.cluster}{args.connectargs}\"\n \nauthenticator = PasswordAuthenticator(args.user, args.password)\nprint(\"Connecting to: \" + CONNSTR)\n\n# Initialise the web app\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SWAGGER'] = {\n 'openapi': '3.0.3',\n 'title': 'Travel Sample API',\n 'version': '1.0',\n 'description': 'A sample API for getting started with Couchbase Server and the SDK.',\n 'termsOfService': ''\n}\n\nswagger_template = {\n \"components\": {\n \"securitySchemes\": {\n \"bearer\": {\n \"type\": \"http\",\n \"scheme\": \"bearer\",\n \"bearerFormat\": \"JWT\",\n \"description\": \"JWT Authorization header using the Bearer scheme.\"\n }\n },\n \"schemas\": {\n \"Error\": {\n \"type\": \"object\",\n \"properties\": {\n \"message\": {\n \"type\": \"string\",\n \"example\": \"An error message\"\n }\n }\n },\n \"Context\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"}\n },\n \"ResultList\": {\n \"type\": \"object\",\n \"properties\": {\n \"context\": {\"$ref\": \"#/components/schemas/Context\"},\n \"data\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"object\"}\n }\n }\n },\n \"ResultSingleton\": {\n \"type\": \"object\",\n \"properties\": {\n \"context\": {\"$ref\": \"#/components/schemas/Context\"},\n \"data\": {\n \"type\": \"object\",\n }\n }\n }\n }\n }\n}\n\n\napi = Blueprint(\"api\", __name__)\n\nCORS(app, headers=['Content-Type', 'Authorization'])\n\n# The default API endpoint\[email protected]('/')\ndef index():\n \"\"\"Returns the index page\n ---\n responses:\n 200:\n description: Returns the API index page\n content:\n text/html:\n example: <h1> Travel Sample API </h1>\n \"\"\"\n\n return \"\"\"\n <h1> Python Travel Sample API </h1>\n A sample API for getting started with Couchbase Server and the Python SDK.\n <ul>\n <li> <a href = \"/apidocs\"> Learn the API with Swagger, interactively </a>\n <li> <a href = \"https://github.com/couchbaselabs/try-cb-python\"> GitHub </a>\n </ul>\n \"\"\"\n\n\ndef lowercase(key):\n return key.lower()\n\nclass AirportView(SwaggerView):\n \"\"\"Airport class for airport objects in the database\"\"\"\n\n @api.route('/airports', methods=['GET', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def airports():\n \"\"\"Returns list of matching airports and the source query\n ---\n tags:\n - airports\n parameters:\n - name: search\n in: query\n required: true\n schema:\n type: string\n example: SFO\n description: The airport name/code to search for\n responses:\n 200:\n description: Returns airport data and query context information\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ResultList' \n example:\n context: [\"A description of a SQL++ operation\"]\n data: [{\"airportname\": \"San Francisco Intl\"}]\n \"\"\"\n\n queryType = \"SQL++ query - scoped to inventory: \"\n partialAirportName = request.args['search']\n\n queryPrep = \"SELECT airportname FROM `travel-sample`.inventory.airport WHERE \"\n sameCase = partialAirportName == partialAirportName.lower() or partialAirportName == partialAirportName.upper() #bool\n\n # The code does some guesswork to determine what the user is typing in.\n # This is based on string length and capitalization. If it believes the\n # string is an FAA or ICAO code, it queries for a match in the 'faa' or\n # 'icao' field. Otherwise, the code assumes a partial airport name, and\n # queries for a substring match at the start of the 'airportname' field\n\n if sameCase and len(partialAirportName) == 3:\n queryPrep += \"faa=$1\"\n queryArgs = [partialAirportName.upper()]\n elif sameCase and len(partialAirportName) == 4:\n queryPrep += \"icao=$1\"\n queryArgs = [partialAirportName.upper()]\n else:\n queryPrep += \"POSITION(LOWER(airportname), $1) = 0\"\n queryArgs = [partialAirportName.lower()]\n\n results = cluster.query(queryPrep, *queryArgs)\n airports = [x for x in results]\n\n # 'context' is returned to the frontend to be shown in the Query Log\n\n context = [queryType + queryPrep]\n\n response = make_response(jsonify({\"data\": airports, \"context\": context}))\n return response\n\n\nclass FlightPathsView(SwaggerView):\n \"\"\" FlightPath class for computed flights between two airports FAA codes\"\"\"\n\n @api.route('/flightPaths/<fromLoc>/<toLoc>', methods=['GET', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def flightPaths(fromLoc, toLoc):\n \"\"\"\n Return flights information, cost and more for a given flight time and date\n ---\n tags:\n - flightPaths\n parameters:\n - name: fromLoc\n in: path\n required: true\n schema:\n type: string\n example: San Francisco Intl\n description: Airport name for beginning route\n - name: toLoc\n in: path\n required: true\n schema:\n type: string\n example: Los Angeles Intl\n description: Airport name for end route\n - name: leave\n in: query\n required: true\n schema:\n type: string\n format: date\n example: \"05/24/2021\"\n description: Date of flight departure in `mm/dd/yyyy` format\n responses:\n 200:\n description: Returns flight data and query context information\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ResultList'\n example:\n context: [\"SQL++ query - scoped to inventory: SELECT faa as fromAirport FROM `travel-sample`.inventory.airport\n WHERE airportname = $1 UNION SELECT faa as toAirport FROM `travel-sample`.inventory.airport WHERE airportname = $2\"]\n data: [{\n \"destinationairport\": \"LAX\",\n \"equipment\": \"738\",\n \"flight\": \"AA331\",\n \"flighttime\": 1220,\n \"name\": \"American Airlines\",\n \"price\": 152.5,\n \"sourceairport\": \"SFO\",\n \"utc\": \"16:37:00\"\n }]\n \"\"\"\n\n # 'context' is returned to the frontend to be shown in the Query Log\n\n queryType = \"SQL++ query - scoped to inventory: \"\n context = []\n\n faaQueryPrep = \"SELECT faa as fromAirport FROM `travel-sample`.inventory.airport \\\n WHERE airportname = $1 \\\n UNION SELECT faa as toAirport FROM `travel-sample`.inventory.airport \\\n WHERE airportname = $2\"\n \n faaResults = cluster.query(faaQueryPrep, fromLoc, toLoc)\n\n # The query results are an iterable object consisting of dicts with the\n # fields from each doc. The results from the query will be formatted as\n # [{'fromAirport':<faa code>}, {'toAirport':<faa code>}]\n # Note, results are unordered, so the ordering above may be inaccurate.\n # The iterable therefore needs to be flattened so the correct field can\n # be extracted.\n \n flightPathDict = {}\n for result in faaResults:\n flightPathDict.update(result)\n\n # flightPathDict will be formatted as\n # {'fromAirport':<faa code>, 'toAirport':<faa code>}\n\n queryFrom = flightPathDict['fromAirport']\n queryTo = flightPathDict['toAirport']\n\n context.append(queryType + faaQueryPrep)\n\n routeQueryPrep = \"SELECT a.name, s.flight, s.utc, r.sourceairport, r.destinationairport, r.equipment \\\n FROM `travel-sample`.inventory.route AS r \\\n UNNEST r.schedule AS s \\\n JOIN `travel-sample`.inventory.airline AS a ON KEYS r.airlineid \\\n WHERE r.sourceairport = $fromfaa AND r.destinationairport = $tofaa AND s.day = $dayofweek \\\n ORDER BY a.name ASC;\"\n\n # The date provided by the frontend needs to be converted into a number\n # between 0 and 6 (representing the days of the week) in order to match\n # the format in the database.\n\n flightDay = convdate(request.args['leave'])\n routeResults = cluster.query(routeQueryPrep, \n fromfaa=queryFrom, \n tofaa=queryTo, \n dayofweek=flightDay)\n\n # The 'QueryResult' object can only be iterated over once - any further\n # attempts to do so will result in an 'AlreadyQueried' exception. It is\n # good practice to move the results into another data structure such as\n # a list.\n # Price data is not a part of the sample dataset, so a random number is\n # picked and added to the result dict.\n\n routesList = []\n for route in routeResults:\n route['price'] = math.ceil(random() * 500) + 250\n routesList.append(route)\n\n # Include the second routes query in the context\n context.append(queryType + routeQueryPrep)\n\n response = make_response(jsonify({\"data\": routesList, \"context\": context}))\n return response\n\n\nclass TenantUserView(SwaggerView):\n \"\"\"Class for storing user related information for a given tenant\"\"\"\n\n @api.route('/tenants/<tenant>/user/login', methods=['POST', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def login(tenant):\n \"\"\"Login an existing user for a given tenant agent\n ---\n tags:\n - tenants\n parameters:\n - name: tenant\n in: path\n required: true\n schema:\n type: string\n example: tenant_agent_00\n description: Tenant agent name\n requestBody:\n content:\n application/json:\n schema:\n type: object\n required:\n - user\n - password\n properties:\n user:\n type: string\n example: \"user1\"\n password:\n type: string\n example: \"password1\"\n responses:\n 200:\n description: Returns login data and query context information\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ResultSingleton' \n example:\n context: [\"KV get - scoped to tenant_agent_00.users: for password field in document user1\"]\n data: \n token: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjoibXNfdXNlciJ9.GPs8two_vPVBpdqD7cz_yJ4X6J9yDTi6g7r9eWyAwEM\n 401:\n description: Returns an authentication error\n content:\n application/json:\n schema: \n $ref: '#/components/schemas/Error'\n \"\"\" \n\n requestBody = request.get_json()\n user = requestBody['user']\n providedPassword = requestBody['password']\n\n userDocumentKey = lowercase(user)\n\n agent = lowercase(tenant)\n scope = bucket.scope(agent)\n users = scope.collection('users')\n\n queryType = f\"KV get - scoped to {scope.name}.users: for password field in document \"\n\n # Perform a sub-document GET request for the 'password' field on a\n # document with the provided username as the key.\n try:\n documentPassword = users.lookup_in(userDocumentKey, (\n SD.get('password'),\n )).content_as[str](0)\n\n if documentPassword != providedPassword:\n return abortmsg(401, \"Password does not match\")\n\n except DocumentNotFoundException:\n print(f\"User {user} item does not exist\", flush=True)\n except AmbiguousTimeoutException or UnAmbiguousTimeoutException:\n print(\"Request timed out - has Couchbase stopped running?\", flush=True)\n else:\n return jsonify({'data': {'token': genToken(user)}, 'context': [queryType + user]})\n\n return abortmsg(401, \"Failed to get user data\")\n\n @api.route('/tenants/<tenant>/user/signup', methods=['POST', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def signup(tenant):\n \"\"\"Signup a new user\n ---\n tags:\n - tenants\n parameters:\n - name: tenant\n in: path\n required: true\n schema:\n type: string\n example: tenant_agent_00\n description: Tenant agent name\n requestBody:\n content:\n application/json:\n schema:\n type: object\n required:\n - user\n - password\n properties:\n user:\n type: string\n example: \"user1\"\n password:\n type: string\n example: \"password1\"\n responses:\n 201:\n description: Returns login data and query context information\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ResultSingleton' \n example:\n context: [\"KV insert - scoped to tenant_agent_00.users: document user1\"]\n data:\n token: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjoibXNfdXNlciJ9.GPs8two_vPVBpdqD7cz_yJ4X6J9yDTi6g7r9eWyAwEM\n 409:\n description: Returns a conflict error\n content:\n application/json:\n schema: \n $ref: '#/components/schemas/Error'\n \"\"\"\n \n requestBody = request.get_json()\n user = requestBody['user']\n password = requestBody['password']\n\n userDocumentKey = lowercase(user)\n\n agent = lowercase(tenant)\n scope = bucket.scope(agent)\n users = scope.collection('users')\n\n queryType = f\"KV insert - scoped to {scope.name}.users: document \"\n\n try:\n users.insert(userDocumentKey, {'username': user, 'password': password})\n responseJSON = jsonify(\n {'data': {'token': genToken(user)}, 'context': [queryType + user]})\n response = make_response(responseJSON)\n return response, 201\n\n except DocumentExistsException:\n print(f\"User {user} item already exists\", flush=True)\n return abortmsg(409, \"User already exists\")\n except Exception as e:\n print(e)\n return abortmsg(500, \"Failed to save user\", flush=True)\n\n @api.route('/tenants/<tenant>/user/<username>/flights', methods=['GET', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def getflights(tenant, username):\n \"\"\"List the flights that have been reserved by a user\n ---\n tags:\n - tenants\n parameters:\n - name: tenant\n in: path\n required: true\n schema:\n type: string\n example: tenant_agent_00\n description: Tenant agent name\n - name: username\n in: path\n required: true\n schema:\n type: string\n example: user1\n description: Username\n responses:\n 200:\n description: Returns flight data and query context information\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ResultList'\n example: \n context: [\"KV get - scoped to tenant_agent_00.users: for 2 bookings in document user1\"]\n data: [\n {\n \"date\": \"05/24/2021\",\n \"destinationairport\": \"LAX\",\n \"equipment\": \"738\",\n \"flight\": \"AA655\",\n \"flighttime\": 5383,\n \"name\": \"American Airlines\",\n \"price\": 672.88,\n \"sourceairport\": \"SFO\",\n \"utc\": \"11:42:00\"\n },\n {\n \"date\": \"05/28/2021\",\n \"destinationairport\": \"SFO\",\n \"equipment\": \"738\",\n \"flight\": \"AA344\",\n \"flighttime\": 6081,\n \"name\": \"American Airlines\",\n \"price\": 760.13,\n \"sourceairport\": \"LAX\",\n \"utc\": \"20:47:00\"\n }\n ]\n 401:\n description: Returns an authentication error\n content:\n application/json:\n schema: \n $ref: '#/components/schemas/Error'\n security:\n - bearer: []\n \"\"\"\n agent = lowercase(tenant)\n\n scope = bucket.scope(agent)\n users = scope.collection('users')\n flights = scope.collection('bookings')\n\n # HTTP token authentication\n bearer = request.headers['Authorization']\n if not auth(bearer, username):\n return abortmsg(401, 'Username does not match token username: ' + username)\n \n try:\n userDocumentKey = lowercase(username)\n \n # The lookup does both a 'get' and an 'exists' in the same op. This\n # avoids having to handle a 'PathNotFoundException'.\n\n lookupResult = users.lookup_in(\n userDocumentKey,\n [\n SD.get('bookings'),\n SD.exists('bookings')\n ])\n \n bookedFlightKeys = []\n if lookupResult.exists(1):\n bookedFlightKeys = lookupResult.content_as[list](0)\n\n # GET requests are now performed to get the content of the bookings\n\n rows = []\n for key in bookedFlightKeys:\n rows.append(flights.get(key).content_as[dict])\n\n queryType = f\"KV get - scoped to {scope.name}.users: for {len(bookedFlightKeys)} bookings in document \"\n response = make_response(jsonify({\"data\": rows, \"context\": [queryType + userDocumentKey]}))\n return response\n \n except DocumentNotFoundException:\n return abortmsg(401, \"User does not exist\")\n\n @api.route('/tenants/<tenant>/user/<username>/flights', methods=['PUT', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def updateflights(tenant, username):\n \"\"\"Book a new flight for a user\n ---\n tags:\n - tenants\n parameters:\n - name: tenant\n in: path\n required: true\n schema:\n type: string\n example: tenant_agent_00\n description: Tenant agent name\n - name: username\n in: path\n required: true\n schema:\n type: string\n example: user1\n description: Username\n requestBody:\n content:\n application/json:\n schema:\n type: object\n properties:\n flights:\n type: array\n format: string\n example: [{\n \"name\": \"boeing\",\n \"flight\": \"12RF\",\n \"price\": 50.0,\n \"date\": \"12/12/2020\",\n \"sourceairport\": \"London (Gatwick)\",\n \"destinationairport\": \"Leonardo Da Vinci International Airport\"\n }]\n responses:\n 200:\n description: Returns flight data and query context information\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ResultSingleton'\n example:\n context: [\"KV update - scoped to tenant_agent_00.users: for bookings field in document user1\"]\n data:\n added: [{\n \"date\": \"12/12/2020\",\n \"destinationairport\": \"Leonardo Da Vinci International Airport\",\n \"flight\": \"12RF\",\n \"name\": \"boeing\",\n \"price\": 50.0,\n \"sourceairport\": \"London (Gatwick)\"\n }]\n 401:\n description: Returns an authentication error\n content:\n application/json:\n schema: \n $ref: '#/components/schemas/Error'\n security:\n - bearer: []\n \"\"\"\n agent = lowercase(tenant)\n user = lowercase(username)\n\n scope = bucket.scope(agent)\n users = scope.collection('users')\n bookings = scope.collection('bookings')\n\n queryType = f\"KV update - scoped to {scope.name}.users: for bookings field in document \"\n\n # HTTP token authentication\n bearer = request.headers['Authorization']\n if not auth(bearer, username):\n return abortmsg(401, 'Username does not match token username: ' + username)\n \n # Add the flight details to a new document in the bookings collection.\n\n try:\n flightData = request.get_json()['flights'][0]\n flightID = str(uuid.uuid4())\n bookings.upsert(flightID, flightData)\n\n except Exception as e:\n print(e, flush=True)\n return abortmsg(500, \"Failed to add flight data\")\n \n # The booking is document not associated with a user. A Sub-Document op\n # is performed to add the key of the booking document to the 'bookings'\n # field in the given user's document.\n \n try:\n users.mutate_in(user, (SD.array_append('bookings', flightID, create_parents=True),))\n resultJSON = {'data': {'added': [flightData]},\n 'context': [queryType + user]}\n return make_response(jsonify(resultJSON))\n \n except DocumentNotFoundException:\n return abortmsg(401, \"User does not exist\")\n except Exception:\n return abortmsg(500, \"Couldn't update flights\")\n\n\nclass HotelView(SwaggerView):\n \"\"\"Class for storing Hotel search related information\"\"\"\n\n @api.route('/hotels/<description>/<location>/', methods=['GET'])\n @cross_origin(supports_credentials=True)\n def hotels(description, location):\n # Requires FTS index called 'hotels-index'\n # TODO auto create index if missing\n \"\"\"Find hotels using full text search\n ---\n tags:\n - hotels\n parameters:\n - name: description \n in: path\n required: false\n schema:\n type: string\n example: pool\n description: Hotel description keywords\n - name: location\n in: path\n required: false\n schema:\n type: string\n example: San Francisco\n description: Hotel location \n responses:\n 200:\n description: Returns hotel data and query context information\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ResultList'\n example:\n context: [\"FTS search - scoped to: inventory.hotel within fields address,city,state,country,name,description\"]\n data: [\n {\n \"address\": \"250 Beach St, San Francisco, California, United States\",\n \"description\": \"Nice hotel, centrally located (only two blocks from Pier 39). Heated outdoor swimming pool.\",\n \"name\": \"Radisson Hotel Fisherman's Wharf\"\n },\n {\n \"address\": \"121 7th St, San Francisco, California, United States\",\n \"description\": \"Chain motel with a few more amenities than the typical Best Western; outdoor swimming pool,\n internet access, cafe on-site, pet friendly.\",\n \"name\": \"Best Western Americania\"\n }\n ]\n \"\"\" \n queryPrep = FT.ConjunctionQuery()\n if location != '*' and location != \"\":\n queryPrep.conjuncts.append(\n FT.DisjunctionQuery(\n FT.MatchPhraseQuery(location, field='country'),\n FT.MatchPhraseQuery(location, field='city'),\n FT.MatchPhraseQuery(location, field='state'),\n FT.MatchPhraseQuery(location, field='address')\n ))\n\n if description != '*' and description != \"\":\n queryPrep.conjuncts.append(\n FT.DisjunctionQuery(\n FT.MatchPhraseQuery(description, field='description'),\n FT.MatchPhraseQuery(description, field='name')\n ))\n \n # Attempting to run a compound query with no sub-queries will result in\n # a 'NoChildrenException'.\n\n if len(queryPrep.conjuncts) == 0:\n queryType = \"FTS search rejected - no search terms were provided\"\n response = {'data': [], 'context': [queryType]}\n return jsonify(response)\n \n searchRows = cluster.search_query('hotels-index', \n queryPrep, \n SearchOptions(limit=100))\n\n # The 'SearchResult' object returned by the search does not contain the\n # full document, consisting of just matches and metadata. This metadata\n # includes the document key, so sub-document operations retrieve all of\n # the fields needed by the frontend.\n\n allResults = []\n addressFields = ['address', 'city', 'state', 'country']\n dataFields = ['name', 'description']\n\n scope = bucket.scope('inventory')\n hotel_collection = scope.collection('hotel')\n\n for hotel in searchRows:\n \n # The lookup will succeed even if the document does not contain all\n # fields. Attempting to read these none existent fields will result\n # in a 'DocumentNotFoundException'.\n\n hotelFields = hotel_collection.lookup_in(\n hotel.id, [SD.get(x) for x in [*addressFields, *dataFields]])\n\n # Concatenates the first 4 fields to form the address. \n\n hotelAddress = []\n for x in range(len(addressFields)):\n try:\n hotelAddress.append(hotelFields.content_as[str](x))\n except:\n pass\n hotelAddress = ', '.join(hotelAddress)\n\n # Extracts the other fields.\n\n hotelData = {}\n for x, field in enumerate(dataFields):\n try: \n hotelData[field] = hotelFields.content_as[str](x+len(addressFields))\n except:\n pass\n \n hotelData['address'] = hotelAddress\n allResults.append(hotelData)\n\n queryType = f\"FTS search - scoped to: {scope.name}.hotel within fields {','.join([*addressFields, *dataFields])}\"\n response = {'data': allResults, 'context': [queryType]}\n return jsonify(response)\n\n\ndef abortmsg(code, message):\n response = jsonify({'message': message})\n response.status_code = code\n return response\n\n\ndef convdate(rawdate):\n \"\"\"Returns integer data from mm/dd/YYYY\"\"\"\n day = datetime.strptime(rawdate, '%m/%d/%Y')\n return day.weekday()\n\n\ndef genToken(username):\n return jwt.encode({'user': username}, JWT_SECRET, algorithm='HS256').decode(\"ascii\")\n\n\ndef auth(bearerHeader, username):\n bearer = bearerHeader.split(\" \")[1]\n return username == jwt.decode(bearer, JWT_SECRET)['user']\n\n\ndef connect_db():\n print(CONNSTR, authenticator)\n cluster = Cluster(CONNSTR, ClusterOptions(authenticator))\n bucket = cluster.bucket('travel-sample')\n return cluster, bucket\n\n\nif __name__ == \"__main__\":\n cluster, bucket = connect_db()\n app.register_blueprint(api, url_prefix=\"/api\")\n swagger = Swagger(app, template=swagger_template)\n app.run(debug=True, host='0.0.0.0', port=8080, threaded=False)"
},
{
"alpha_fraction": 0.6453968286514282,
"alphanum_fraction": 0.6574603319168091,
"avg_line_length": 29.278846740722656,
"blob_id": "fea59b43cdf11ace13f7d6e39abb9cc5a466095f",
"content_id": "ce1ba8abfe8f67caee9ec1d9966152ee7e9308dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3150,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 104,
"path": "/sdk_test.py",
"repo_name": "couchbaselabs/try-cb-python",
"src_encoding": "UTF-8",
"text": "from datetime import timedelta\n\n# needed for any cluster connection\nfrom couchbase.auth import PasswordAuthenticator\nfrom couchbase.cluster import Cluster\n# needed for options -- cluster, timeout, SQL++ (N1QL) query, etc.\nfrom couchbase.options import (ClusterOptions, ClusterTimeoutOptions)\nfrom couchbase.exceptions import CouchbaseException\nfrom couchbase.options import SearchOptions\n\nfrom couchbase.search import QueryStringQuery, SearchOptions\n\n# Update this to your cluster\n# endpoint = \"<connection string>\"\n# username = \"<username>\"\n# password = \"<password>\"\nbucket_name = \"travel-sample\"\n# User Input ends here.\n\n# Connect options - authentication\nauth = PasswordAuthenticator(username, password)\n\n# Connect options - global timeout opts\ntimeout_opts = ClusterTimeoutOptions(kv_timeout=timedelta(seconds=10))\n\n# get a reference to our cluster\ncluster = Cluster('couchbases://{}'.format(endpoint),\n ClusterOptions(auth, timeout_options=timeout_opts))\n\n# Wait until the cluster is ready for use.\ncluster.wait_until_ready(timedelta(seconds=5))\n\n# get a reference to our bucket\ncb = cluster.bucket(bucket_name)\n\ncb_coll = cb.scope(\"inventory\").collection(\"airport\")\n\n# json object to be used in example update_airport_name function\nupdated_airport_name = {\n \"airportname\": \"Heathrow Airport\",\n \"city\": \"London\",\n \"country\": \"United Kingdom\",\n \"faa\": \"LHR\",\n \"geo\": {\n \"alt\": 83,\n \"lat\": 51.4775,\n \"lon\": -0.461389\n },\n \"icao\": \"EGLL\",\n \"id\": 507,\n \"type\": \"airport\",\n \"tz\": \"Europe/London\"\n }\n\n# searches all airports\ndef get_all_airports():\n print(\"\\nLookup Result: \")\n try:\n sql_query = 'SELECT * FROM `travel-sample`.inventory.airport'\n row_iter = cluster.query(\n sql_query,)\n for row in row_iter:\n print(row['airport']['airportname'])\n except Exception as e:\n print(e)\n\n# updates airport\ndef update_airport_name(doc):\n print(\"\\nUpsert CAS: \")\n try:\n # key will equal: \"airport_507\"\n key = doc[\"type\"] + \"_\" + str(doc[\"id\"])\n result = cb_coll.upsert(key, doc)\n print(result.cas)\n except Exception as e:\n print(e)\n\n# searching a string with created FTS index called \"Example\"\ndef search_airport_inventory(search_string):\n try:\n result = cluster.search_query(\"Example\", QueryStringQuery(search_string), SearchOptions(limit=10))\n for row in result.rows():\n print(\"Found row: {}\".format(row))\n except CouchbaseException as e:\n print(\"Couchbase Error:\"+str(e))\n except Exception as ex:\n print(\"Error:\"+str(ex))\n\n# gets multiple airports using the airport keys detailed below\ndef get_multi_airports():\n keys = ['airport_507', 'airport_1254', 'airport_1255']\n res = cb_coll.get_multi(keys)\n \n for k, v in res.results.items():\n print(f'Doc {k} has value: {v.content_as[dict]}')\n\n# uncomment each and run file to see output\n# get_multi_airports()\n\n# search_airport_inventory(\"Heathrow\")\n\n# get_all_airports()\n\n# update_airport_name(updated_airport_name)\n\n"
},
{
"alpha_fraction": 0.4803493320941925,
"alphanum_fraction": 0.6986899375915527,
"avg_line_length": 15.428571701049805,
"blob_id": "2f458fd585ee08f86c99bd91c7762a649c042437",
"content_id": "5edb56962cbd8683137c4775afa8759223a1aa04",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 229,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 14,
"path": "/requirements.txt",
"repo_name": "couchbaselabs/try-cb-python",
"src_encoding": "UTF-8",
"text": "couchbase==4.1.3\nDateTime==4.1.1\nFlask==1.1.2\nFlask-Classy==0.6.10\nitsdangerous==0.24\nJinja2==2.11.3\nMarkupSafe==0.23\nPyJWT==1.4.2\npytz==2016.10\nsix>=1.10.0\nWerkzeug==1.0.1\nzope.interface>=5.2.0\nFlask-Cors>=3.0.10\nflasgger==0.9.5"
}
] | 5 |
Alexandra323/car_package
|
https://github.com/Alexandra323/car_package
|
7591f05cbb609524c5bff67df4a51180eaf6aa7e
|
afe0ed5b76dd8a5d9cec9f2c2a807e93187a3604
|
6ae8317c29942df2a4407e510ea165c3defa1efd
|
refs/heads/master
| 2020-12-27T08:06:06.855735 | 2020-02-02T23:00:32 | 2020-02-02T23:00:32 | 237,826,685 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5411290526390076,
"alphanum_fraction": 0.5548387169837952,
"avg_line_length": 31.078947067260742,
"blob_id": "bcd1281e7106b867dd6df6af17a7898a37086af7",
"content_id": "1e4cf428186d5a931595f0bf8c476d78928baf07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1240,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 38,
"path": "/car_package/cars_module.py",
"repo_name": "Alexandra323/car_package",
"src_encoding": "UTF-8",
"text": "\nclass Car():\n\n def __init__(self, make, model, year):\n self.make = make\n self.model =model\n self.year = year\n self.odometr = 0\n\n def car_description(self):\n print(f\"Car info:{self.make},{self.model}, {self.year}\") \n\n def read_odometr(self):\n print(f\"your odometr shows: {self.odometr}\")\n\n def set_odometr(self, miles):\n if miles > self.odometr:\n self.odometr = miles\n else:\n print(f\"You can not set the odometr of less of {self.odometr}\")\n\nclass ElectricCar(Car):\n def __init__(self, make, model, year, battery =75):\n super().__init__(make, model, year)\n self.battery = battery\n\n def car_description(self):\n msg = f\"Car info:{self.make}, {self.model}, {self.year}, \\n\"\n msg +=f\"battery capacity: {self.battery} -kWh.\" \n print(msg)\n \n def get_range(self): \n if self.battery > 100:\n msg = \"This car has range of 320 miles fully charged battery.\"\n elif self.battery <80:\n msg = f\"This ca has range of 250 miles with full charge.\"\n else:\n msg = f\"THis car has range of around 290 miles with full charge.\" \n print(msg) \n"
},
{
"alpha_fraction": 0.6356589198112488,
"alphanum_fraction": 0.6356589198112488,
"avg_line_length": 25.386363983154297,
"blob_id": "e968a9560ef683c143bcf08303d266fccf4ec507",
"content_id": "b46ad36f70626b6e72972cd067fb1822c98f25a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1161,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 44,
"path": "/classes.py",
"repo_name": "Alexandra323/car_package",
"src_encoding": "UTF-8",
"text": "# OBEJCT ORIENTED PROGRAMMING CONCEPTS (OOPs)\n\n# Class - blueprint\n# Object - is the instnace of the class\n\n# creating a class - names with capital letter\n\n\nclass Dog():\n # state\n # constructor - default functions to be execute when you create an object\n def __init__(self, breed, color, name):\n self.breed = breed\n self.color = color\n self.name = name\n\n def describeDog(self):\n return f\"your dog is {self.breed}.\"\n\n def run(self):\n return f\"Your dog {self.name} is running ...\"\n\n def bark(self):\n return f\"{self.name} is barking - Wouf wouf wouf wouf!!!!\"\n\n\n# instantiating the class - creating object of the class\nrex = Dog('german shepherd', 'brown', 'Rex')\nsharik = Dog('husky', 'black', 'Sharik')\n\n\n# access the class state and behavior\nprint(f\"the breed of the rex is {rex.breed}\")\nprint(f\"the color of the rex is {rex.color}\")\nprint(rex.describeDog())\nprint(rex.run())\nprint(rex.bark())\nprint(\"======================================\")\n\nprint(f\"the breed of the rex is {sharik.breed}\")\nprint(f\"the color of the rex is {sharik.color}\")\nprint(sharik.describeDog())\nprint(sharik.run())\nprint(sharik.bark())\n"
},
{
"alpha_fraction": 0.7035132646560669,
"alphanum_fraction": 0.7232219576835632,
"avg_line_length": 42.22222137451172,
"blob_id": "23910a5464e2bd9aac2e3850d2086368e01c3a71",
"content_id": "0c59954dd286459d44f4fc364d2c2e5d8ca79cf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2344,
"license_type": "no_license",
"max_line_length": 611,
"num_lines": 54,
"path": "/README.md",
"repo_name": "Alexandra323/car_package",
"src_encoding": "UTF-8",
"text": "# Week 4\nFunctions Continued, Modules, Packages, Classes\n\n### Chapter 8. Functions\nIn this chapter you’ll learn to write functions, which are named blocks of code that are designed to do one specific job. When you want to perform a particular task that you’ve defined in a function, you call the name of the function responsible for it. If you need to perform that task multiple times throughout your program, you don’t need to type all the code for the same task again and again; you just call the function dedicated to handling that task, and the call tells Python to run the code inside the function. You’ll find that using functions makes your programs easier to write, read, test, and fix.\n\n#### Defining a Function\nHere’s a simple function named greet_user() that prints a greeting:\n ```python\n def greet_user():\n \"\"\"Display a simple greeting.\"\"\"\n print(\"Hello!\")\n\ngreet_user()\n```\n\n#### Passing Information to a Function\nThe function now expects you to provide a value for username each time you call it. When you call greet_user(), you can pass it a name, such as 'jesse', inside the parentheses:\n\n```python\ndef greet_user(username):\n \"\"\"Display a simple greeting.\"\"\"\n print(f\"Hello, {username.title()}!\")\n\ngreet_user('jesse')\n```\n\n## Steps to clone the project \n1. Copy the url of the repository ending with .git (https://github.com/2019-Fall/week4.git)\n2. GitHub Desktop: \n * Go to Current Repository\n * click on Add drop down\n * Clone Repository\n * click on URL tab\n * paste the copied URL (https://github.com/2019-Fall/week4.git)\n * choose the location from your local machine `C:\\dev\\` then click on Clone.\n\n Git Bash: navigate to the right directory `C:\\dev\\` and enter following:\n ```bash\n git clone https://github.com/2019-Fall/week4.git\n ```\n\n 3. [optional] Create your feature branch: \n ```bash\n git checkout -b week4_john\n ```\n 4. Open the `C:\\dev\\week4` folder from your VS Code and start modifying the code.\n\n## References\n\n* [Python Documentation - Modules](https://docs.python.org/3/tutorial/modules.html)\n* [Socratica - Recursion](https://youtu.be/Qk0zUZW-U_M)\n* [Socratica - Classes and Objects](https://youtu.be/apACNr7DC_s)\n* [Python Crash Course](http://bedford-computing.co.uk/learning/wp-content/uploads/2015/10/No.Starch.Python.Oct_.2015.ISBN_.1593276036.pdf)\n"
},
{
"alpha_fraction": 0.5554584860801697,
"alphanum_fraction": 0.6026200652122498,
"avg_line_length": 22.367347717285156,
"blob_id": "36405756000a7ea64d534cc71981fc22765965f2",
"content_id": "8f1d32718dc1262cf611edbd10516843394761f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1145,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 49,
"path": "/functions_recap.py",
"repo_name": "Alexandra323/car_package",
"src_encoding": "UTF-8",
"text": "# Functions recap\n\n# ALL MY FUNCTIONS HERE\n\n\ndef add(a, b):\n return a+b\n\n\ndef add2(a, b=0):\n \"\"\" a and b is an int, b has defaul value 0\"\"\"\n return a+b\n\n\ndef car_description(model, year, features):\n \"\"\" model:str, year:int, features: list, This functions describes the car based on inputs.\"\"\"\n print(f\"The car's model is {model.upper()}.\")\n print(f\"It was manufactured in {year}.\")\n print(f\"it has following features: \")\n for feature in features:\n print(f\"\\t{feature.title()}.\")\n\n\ndef cars_desc(car):\n print(add(5, 6))\n for field, values in car.items():\n for value in values:\n print(f\"Your car's {field} is {value}.\")\n\n\n# DATA\ncar_tesla = {'model': ['Model X', 'Model Y'],\n 'year': [2020, 2019],\n 'owner': ['Zakaria', 'Oleh']}\n\n\n# ALL MY EXECUTIONS ARE HERE\n\ncars_desc(car_tesla)\nresult1 = add(23, 45)\nresult2 = add(11, 22)\n# result3 = add(12)\nresult3 = add2(12, 65)\nresult4 = add2(12)\nprint(result1, result2, result3, result4)\nprint(add(46789, 12347))\n\ncar_description(\"Model Y\", 2019, [\n 'cool design', 'fancy doors', 'techy tires', 'ugly window'])\n"
}
] | 4 |
peanutbutterjeeelly/StackOverFlow_
|
https://github.com/peanutbutterjeeelly/StackOverFlow_
|
67b093c2ad0094bd4739adfc4d5cae291bf559d7
|
554320e6fd5546a1943638f542fec0f486405237
|
4ee18f0b39609f16c6083dca8755c8e59af630d2
|
refs/heads/main
| 2023-01-13T10:39:09.821197 | 2020-11-20T23:04:02 | 2020-11-20T23:04:02 | 314,679,513 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.38557806611061096,
"alphanum_fraction": 0.4404052495956421,
"avg_line_length": 35.4782600402832,
"blob_id": "4b80c6928e2ba5f8c0aa91413bfdef0de01293e4",
"content_id": "c666fa1766444d094296c98f75bf359caa6ead84",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1678,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 46,
"path": "/exploit.py",
"repo_name": "peanutbutterjeeelly/StackOverFlow_",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport sys\n\nshellcode= (\n \"\\x31\\xc0\"\n \"\\x31\\xdb\"\n \"\\xb0\\xd5\"\n \"\\xcd\\x80\"\n \"\\x31\\xc0\" # xorl %eax,%eax \n \"\\x50\" # pushl %eax \n \"\\x68\"\"//sh\" # pushl $0x68732f2f \n \"\\x68\"\"/bin\" # pushl $0x6e69622f \n \"\\x89\\xe3\" # movl %esp,%ebx \n \"\\x50\" # pushl %eax \n \"\\x53\" # pushl %ebx \n \"\\x89\\xe1\" # movl %esp,%ecx \n \"\\x99\" # cdq \n \"\\xb0\\x0b\" # movb $0x0b,%al \n \"\\xcd\\x80\" # int $0x80 \n \"\\x00\"\n).encode('latin-1')\n\n# Fill the content with NOP's\ncontent = bytearray(0x90 for i in range(517))\n\n#########################################################################\n# Replace 0 with the correct offset value\nD = 0\n# Fill the return address field with the address of the shellcode\n# Replace 0xFF with the correct value \ncontent[D+0] = 0xFF # fill in the 1st byte (least significant byte) \ncontent[D+1] = 0xFF # fill in the 2nd byte \ncontent[D+2] = 0xFF # fill in the 3rd byte\ncontent[D+3] = 0xFF # fill in the 4th byte (most significant byte)\n#########################################################################\n\n# Put the shellcode at the end\nstart = 517 - len(shellcode) \ncontent[start:] = shellcode \nret = 0xbfffea88 + 116\ncontent[36:40] = (ret).to_bytes(4,byteorder='little')\n# Write the content to badfile\nfile = open(\"badfile\", \"wb\")\nfile.write(content)\nfile.close()\n"
},
{
"alpha_fraction": 0.8387096524238586,
"alphanum_fraction": 0.8387096524238586,
"avg_line_length": 30,
"blob_id": "3e7e600de7cbdae75d926d0141ed27ad713b3ff1",
"content_id": "3f2c124ced087a5a21b33b6fd2b65a29d743fcaa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 62,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 2,
"path": "/README.md",
"repo_name": "peanutbutterjeeelly/StackOverFlow_",
"src_encoding": "UTF-8",
"text": "# StackOverFlow_\nSome code and report regarding StackOverFlow\n"
}
] | 2 |
trytonus/hello-world
|
https://github.com/trytonus/hello-world
|
582a00159384d1e23cd230b5203931a1ff2dae40
|
933ea09a3102b6707570cf48bfab540299a4d4e1
|
ef97f988e5179c31a732fc5e714f39e2fe35a3f3
|
refs/heads/master
| 2020-03-11T17:50:57.312396 | 2018-04-19T04:24:21 | 2018-04-19T04:24:21 | 130,159,125 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6569767594337463,
"alphanum_fraction": 0.6569767594337463,
"avg_line_length": 16.200000762939453,
"blob_id": "3d8628820d803db923b9ff9d2a17236c2ded1c5a",
"content_id": "8d231590b19d555355542660debaa8b14c634d35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 10,
"path": "/__init__.py",
"repo_name": "trytonus/hello-world",
"src_encoding": "UTF-8",
"text": "from trytond.pool import Pool\n\nfrom .helloworld import HelloWorld\n\n\ndef register():\n Pool.register(\n HelloWorld,\n module='helloworld', type_='model'\n )\n"
},
{
"alpha_fraction": 0.703071653842926,
"alphanum_fraction": 0.703071653842926,
"avg_line_length": 25.636363983154297,
"blob_id": "a79b3711522ee670c15ed30983a627fad82c7d5a",
"content_id": "952cd76c30c6b1c8359f45ef00d6da9d1ebc4cd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 11,
"path": "/helloworld.py",
"repo_name": "trytonus/hello-world",
"src_encoding": "UTF-8",
"text": "from trytond.model import ModelView, ModelSQL, fields\nfrom trytond.pyson import Eval\nfrom trytond import backend\n\n__all__ = ['Country', 'Subdivision', 'Zip']\n\n\nclass HelloWorld(ModelSQL, ModelView):\n 'Hello World'\n __name__ = 'hello.world'\n name = fields.Char('Hello', required=True)\n"
}
] | 2 |
npredey/CMEBlockTrades
|
https://github.com/npredey/CMEBlockTrades
|
4bc2af70f2a8b75c8ab65e55072018677d11f5ce
|
b451a33a53d5aae9633ccd5719c1fad5af8ec903
|
a3493c784dd8e72ea4ced3506eb98e864673db83
|
refs/heads/master
| 2020-04-21T19:23:49.395612 | 2019-02-08T22:00:43 | 2019-02-08T22:00:43 | 169,804,560 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7586776614189148,
"alphanum_fraction": 0.7652892470359802,
"avg_line_length": 39.33333206176758,
"blob_id": "884960d2543beb4070561b5f5a2e8d3dfb958de3",
"content_id": "6ec39a0a77ddcc2bd9f7784376480b5d20db9634",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 605,
"license_type": "no_license",
"max_line_length": 252,
"num_lines": 15,
"path": "/block_trades.py",
"repo_name": "npredey/CMEBlockTrades",
"src_encoding": "UTF-8",
"text": "import datetime\nimport requests\nimport pandas as pd\n\ncurrent_date = f\"{datetime.datetime.now():%m%d%Y}\"\ncme_url = \"https://www.cmegroup.com/CmeWS/mvc/xsltTransformer.do?xlstDoc=/XSLT/md/blocks-records.xsl&url=/da/BlockTradeQuotes/V1/Block/BlockTrades?exchange=XCBT,XCME,XCEC,DUMX,XNYM&foi=FUT,OPT,SPD&assetClassId=1,6&tradeDate={}&sortCol=time&sortBy=desc\"\ncme_url = cme_url.format(current_date)\n# print(cme_url)\n\nhtml_data = requests.get(cme_url)._content\ntables = pd.read_html(html_data)\nblock_trades_table = tables[0]\n# print(type(block_trades_table))\n\nprint(block_trades_table.to_string(index=False))\n"
},
{
"alpha_fraction": 0.8088235259056091,
"alphanum_fraction": 0.8088235259056091,
"avg_line_length": 67,
"blob_id": "0d36b23f53336fb4a7349fb9a5b2a61db94f081a",
"content_id": "e43b8756eb65d880805e1991a29a0e78de5e0cb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 2,
"path": "/README.md",
"repo_name": "npredey/CMEBlockTrades",
"src_encoding": "UTF-8",
"text": "# CMEBlockTrades\nThis project provides code to get Block Trades from the CME and display them in a formatted table on the command line.\n"
}
] | 2 |
DumDereDum/kinfu_omz
|
https://github.com/DumDereDum/kinfu_omz
|
28330d2abc38d79154bfd54dc7994445bcf79c1f
|
40f9fc781a4ceb6ba0a4976edb45be84439de2a1
|
5a9dce029a82c955f81827659634514c715868c3
|
refs/heads/main
| 2023-04-14T12:01:01.606810 | 2021-04-21T07:13:21 | 2021-04-21T07:13:21 | 358,509,816 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5972644090652466,
"alphanum_fraction": 0.6051671504974365,
"avg_line_length": 35.153846740722656,
"blob_id": "996f490b52dbf68d5181c1003f634bc08590c02e",
"content_id": "110f0a32702c89eac34f0d958116252225ee40c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3290,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 91,
"path": "/midasnet_demo.py",
"repo_name": "DumDereDum/kinfu_omz",
"src_encoding": "UTF-8",
"text": "import sys\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport logging as log\nfrom openvino.inference_engine import IECore\n\n\ndef getRGBlist(folder):\n f = open(folder + '\\\\rgb.txt', 'r')\n rgb = [folder + '\\\\' + s.replace('/', '\\\\') for s in f.read().split() if '.png' in s]\n return rgb\n\n\ndef midasnet_demo():\n # arguments\n parser = ArgumentParser()\n\n parser.add_argument(\n \"-m\", \"--model\", help=\"Required. Path to an .xml file with a trained model\", required=True, type=Path)\n parser.add_argument(\n \"-i\", \"--input\", help=\"Required. Path to a input image file\", required=True, type=str)\n parser.add_argument(\"-l\", \"--cpu_extension\",\n help=\"Optional. Required for CPU custom layers. Absolute MKLDNN (CPU)-targeted custom layers. \"\n \"Absolute path to a shared library with the kernels implementations\", type=str,\n default=None)\n parser.add_argument(\"-d\", \"--device\",\n help=\"Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. \"\n \"Sample will look for a suitable plugin for device specified. Default value is CPU\",\n default=\"CPU\", type=str)\n\n args = parser.parse_args()\n\n # logging\n log.basicConfig(format=\"[ %(levelname)s ] %(message)s\",\n level=log.INFO, stream=sys.stdout)\n\n log.info(\"creating inference engine\")\n ie = IECore()\n if args.cpu_extension and \"CPU\" in args.device:\n ie.add_extension(args.cpu_extension, \"CPU\")\n\n log.info(\"Loading network\")\n net = ie.read_network(args.model, args.model.with_suffix(\".bin\"))\n\n assert len(net.input_info) == 1, \"Sample supports only single input topologies\"\n assert len(net.outputs) == 1, \"Sample supports only single output topologies\"\n\n log.info(\"preparing input blobs\")\n input_blob = next(iter(net.input_info))\n out_blob = next(iter(net.outputs))\n net.batch_size = 1\n\n # read and pre-process input image\n _, _, height, width = net.input_info[input_blob].input_data.shape\n\n # loading model to the plugin\n log.info(\"loading model to the plugin\")\n exec_net = ie.load_network(network=net, device_name=args.device)\n rgb_list = getRGBlist(args.input)\n\n for s in rgb_list:\n print(s)\n image = cv2.imread(s, cv2.IMREAD_COLOR)\n cv2.imshow('input', image)\n (input_height, input_width) = image.shape[:-1]\n\n if (input_height, input_width) != (height, width):\n image = cv2.resize(image, (width, height), cv2.INTER_CUBIC)\n image = image.astype(np.float32)\n image = image.transpose((2, 0, 1))\n image_input = np.expand_dims(image, 0)\n res = exec_net.infer(inputs={input_blob: image_input})\n disp = np.squeeze(res[out_blob][0])\n disp = cv2.resize(disp, (input_width, input_height), cv2.INTER_CUBIC)\n\n disp_min = disp.min()\n disp_max = disp.max()\n\n if disp_max - disp_min > 1e-6:\n disp = (disp - disp_min) / (disp_max - disp_min)\n else:\n disp.fill(0.5)\n\n cv2.imshow('output', disp)\n cv2.waitKey(1)\n\nif __name__ == '__main__':\n midasnet_demo()\n"
},
{
"alpha_fraction": 0.5648648738861084,
"alphanum_fraction": 0.5783783793449402,
"avg_line_length": 24.227272033691406,
"blob_id": "045e62300584183555f3678798b54f7b7830c100",
"content_id": "13c2b1edf241efeeb9292f7859c973dde6eb2be7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1110,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 44,
"path": "/kinfu_demo.py",
"repo_name": "DumDereDum/kinfu_omz",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\n\nfrom argparse import ArgumentParser\n\ndef get_depth_list(folder):\n f = open(folder + '\\\\depth.txt', 'r')\n rgb = [folder + '\\\\' + s.replace('/', '\\\\') for s in f.read().split() if '.png' in s]\n return rgb\n\ndef kinfu_demo():\n parser = ArgumentParser()\n parser.add_argument(\n \"-i\", \"--input\", help=\"Required. Path to folder with a input image file\", required=True, type=str)\n\n args = parser.parse_args()\n print(\"Args: \", args)\n depth_list = get_depth_list(args.input)\n\n params = cv2.kinfu_Params.defaultParams()\n kf = cv2.kinfu_KinFu.create(params)\n\n for path in depth_list:\n image = cv2.imread(path, cv2.IMREAD_ANYDEPTH)\n\n (height, width) = image.shape[:]\n\n cv2.imshow('input', image)\n cv2.waitKey(1)\n\n size = height, width, 4\n cvt8 = np.zeros(size, dtype=np.uint8)\n\n flag = kf.update(image)\n if not flag:\n kf.reset()\n else:\n kf.render(cvt8)\n cv2.imshow('render', cvt8)\n\n\nif __name__ == '__main__':\n cv2.setUseOptimized(True)\n kinfu_demo()\n"
}
] | 2 |
mc51/cloud-clipboard
|
https://github.com/mc51/cloud-clipboard
|
1ec92054985f699f1db72889b1b4b40fa77414ba
|
39da175c6dc793b2929f73624c5c12682a71080e
|
d08bd6841f79668bd80a5691d9dc15932c4bee44
|
refs/heads/master
| 2022-11-21T20:56:01.105151 | 2020-07-20T19:58:00 | 2020-07-20T19:58:00 | 277,603,692 | 1 | 0 | null | 2020-07-06T17:16:02 | 2020-07-03T22:43:03 | 2017-09-10T19:43:19 | null |
[
{
"alpha_fraction": 0.5738242864608765,
"alphanum_fraction": 0.5833029747009277,
"avg_line_length": 25.375,
"blob_id": "2f236c91ea3beb7a6a5d91180016d37d2b931f7f",
"content_id": "c22dcb83302e0afa28309d21502945fee9ca4e10",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2743,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 104,
"path": "/cloudcb.py",
"repo_name": "mc51/cloud-clipboard",
"src_encoding": "UTF-8",
"text": "#!/Usr/bin/env python3\n\n# This script acts as desktop client\n\nimport os\nimport sys\nimport time\nimport json\nimport subprocess\nimport requests\nimport pyperclip\n\n#server_url = \"https://cloudcb.herokuapp.com/\"\n# server_url = \"https://data-dive.com:9999/\"\nserver_url = \"http://localhost:8000/\"\nTIMEOUT = 2\n\ndef wait_for_copy():\n \"\"\"\n Wait until something is copied, i.e. clipboard content changes\n \"\"\"\n data = copy()\n print(f\"Current clipboard: {data}\\n\")\n while True:\n time.sleep(TIMEOUT)\n data_new = copy()\n if data_new != data:\n # Todo -> RegEx check for only whitespaces, i.e. empty strings\n print(f\"Got new clipboard content: {data_new}\\n\")\n break\n else:\n print(\"Waiting for new copy action...\", sep=\"\", end=\"\\r\")\n\ndef copy():\n \"\"\"\n Returns the current text on clipboard.\n \"\"\"\n data = pyperclip.paste()\n return data\n\ndef upload(username, password):\n \"\"\"\n Sends the copied text to server.\n \"\"\"\n payload = {\"text\": copy(), \"device\": \"\"}\n res = requests.post(\n server_url+\"copy-paste/\",\n data = payload,\n auth = (username, password)\n )\n if res.status_code == 200:\n print(\"Succeses! Copied to Cloud-Clipboard.\")\n else:\n print(\"Error: \", res.text)\n\ndef paste(data):\n \"\"\"\n Copies 'data' to local clipboard which enables pasting.\n \"\"\"\n pyperclip.copy(data)\n\ndef download(username, password):\n \"\"\"\n Downloads from server and updates the local clipboard.\n \"\"\"\n res = requests.get(server_url+\"copy-paste/\", auth=(username, password))\n if res.status_code == 200:\n paste(json.loads(res.text)[\"text\"])\n else:\n print(\"Cannot download the data.\")\n\ndef register(username, password):\n \"\"\"\n To let user register.\n \"\"\"\n payload = {\"username\": username, \"password\": password}\n res = requests.post(server_url+\"register/\", data=payload)\n if res.status_code == 201:\n print(\"Hi %s! You are all set.\" % username)\n else:\n print(\"Error: \", res.text)\n\ndef usage():\n print(\"Error: Unknown argument\")\n print(\"Usage: cloudcb.py copy|paste|register|wait <email> <password>\")\n\n\nif __name__ == \"__main__\":\n #print(\"Cloud Clipboard -- Share you clipboard accross the devices.\")\n if len(sys.argv) == 4:\n username = sys.argv[2]\n password = sys.argv[3]\n if sys.argv[1] == \"copy\":\n upload(username, password)\n elif sys.argv[1] == \"paste\":\n download(username, password)\n elif sys.argv[1] == \"register\":\n register(username, password)\n elif sys.argv[1] == \"wait\":\n wait_for_copy()\n else:\n usage()\n else:\n usage()\n"
},
{
"alpha_fraction": 0.7757009267807007,
"alphanum_fraction": 0.7757009267807007,
"avg_line_length": 26,
"blob_id": "6f54db9806e42c6dad7ad9538d605c88f16140ef",
"content_id": "cc8f25883bc5dc49b1251170b6ac296ebf63c814",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 4,
"path": "/server/clipboard/admin.py",
"repo_name": "mc51/cloud-clipboard",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\n# TODO Add Clip here so we can edit as admin"
},
{
"alpha_fraction": 0.6843170523643494,
"alphanum_fraction": 0.6934232711791992,
"avg_line_length": 33.08045959472656,
"blob_id": "7ec9fcaf92bccae7245d9f3b155011265953dff1",
"content_id": "211cfc3b4ac4fad4cc8204008201f2f5ce338366",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2965,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 87,
"path": "/server/clipboard/views.py",
"repo_name": "mc51/cloud-clipboard",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import Http404\nfrom clipboard.models import Clip\nfrom clipboard.serializers import ClipSerializer, UserSerializer\nfrom clipboard.permissions import IsOwnerOrReadOnly\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import permissions\nfrom rest_framework.throttling import AnonRateThrottle\nfrom django.contrib.auth.password_validation import validate_password, ValidationError\n\n# ToDos: Rate Limits per user and total for login / register\n\nclass ListClip(APIView):\n \"\"\"\n List the most recently copied texts. I am calling them 'clips'.\n \"\"\"\n def get(self, request):\n # Only show own clips\n clips = Clip.objects.filter(user=self.request.user)\n serializer = ClipSerializer(clips, many=True)\n return Response(serializer.data)\n\n\n def post(self, request):\n serializer = ClipSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=self.request.user) # explicitly specifying user\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)\n\n\nclass CopyPaste(APIView):\n \"\"\"\n Update and retrieve the data.\n \"\"\"\n def get_clip(self, user):\n try:\n return Clip.objects.get(user=user)\n except Clip.DoesNotExist:\n raise Http404\n\n def post(self, request):\n clip = self.get_clip(request.user)\n serializer = ClipSerializer(clip, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get(self, request):\n clip = self.get_clip(request.user)\n serializer = ClipSerializer(clip)\n return Response(serializer.data)\n\n permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)\n\n\nclass UserRegister(APIView):\n \"\"\"\n To register new users.\n \"\"\"\n\n throttle_classes = (AnonRateThrottle,)\n\n def post(self, request):\n try:\n validate_password(request.data['password'])\n except ValidationError as e:\n return Response(e, status=status.HTTP_400_BAD_REQUEST)\n else:\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data['username'], status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserVerify(APIView):\n\n def get(self, request):\n return Response(status=status.HTTP_200_OK)\n\n permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly)\n"
}
] | 3 |
christinefaye/scripts
|
https://github.com/christinefaye/scripts
|
0035a26a4414f8c3dbccf1d87277cb0cd4b262ba
|
80c34e36446b25e436292870dbf6843155a35d86
|
75b9fcb3d392d3d85117abfa7d30a0079298753c
|
refs/heads/master
| 2021-01-10T13:08:16.453835 | 2015-11-20T20:05:12 | 2015-11-20T20:05:12 | 46,577,492 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.63670414686203,
"alphanum_fraction": 0.63670414686203,
"avg_line_length": 23.121212005615234,
"blob_id": "7b630d63aef8ea2851650739ef50f6df8525948e",
"content_id": "e5e2d77bad016df1f349e1679928c1ea5272f623",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 33,
"path": "/tools/commonMod/cmConnect.py",
"repo_name": "christinefaye/scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n#TODO:\n\n'''\nModule to connect to a host\n\n'''\n\nimport paramiko\n\nclass cConnect:\n ''' Connection to host class '''\n # user\t: string for host username\n # passwd\t: string for host password\n # ip\t\t: string for host ip\n def __init__(self,user,passwd,ip):\n assert user and passwd and ip\n self.usr = user\t\t# Host username\n self.pwd = passwd\t\t# Host password\n self.ip = ip\t\t# Host ip address\n\n def ssh(self):\n ''' SSH connection to host '''\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(self.ip,username=self.usr,password=self.pwd)\n stdin,stdout,sterr=self.ssh.exec_command(\"uptime\")\n print type(stdin)\n print stdout.readlines()\n\n \n#----------END CLASS-------------#\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6238859295845032,
"alphanum_fraction": 0.6274510025978088,
"avg_line_length": 22.76595687866211,
"blob_id": "6d75bf628f42b7bef79878067ace53d310f3f6e7",
"content_id": "654b5263f4e62211f20e01a84ce4907d047ca824",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1122,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 47,
"path": "/tools/commonMod/cmHost.py",
"repo_name": "christinefaye/scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport socket\nfrom cmConnect import cConnect\n'''\nModule to describe host properties\n\n'''\n\n#TODO:\n # verify ip\n\n\nclass cHost:\n ''' Remote host class '''\n # user\t: string for host username\n # passwd\t: string for host password\n # ip\t\t: string for host aipv4 address\n # url\t\t: string for host uniform resource locator\n def __init__(self,user,passwd,ip=None,url=None):\n self.usr = user\t\t# Host username\n self.pwd = passwd\t\t# Host password\n self.ip = ip\t\t# Host ip address\n self.url = url.strip()\t# Host url\n self.cxn = None\t\t# a cConnect object\n if not self.ip:\n self.urlToIp()\n if self.validateIPv4():\n assert self.ip\n assert self.usr and self.pwd and self.ip\n\n def urlToIp(self):\n ''' Convert URL to IP'''\n self.ip=socket.gethostbyname(self.url)\n\n def validateIPv4(self):\n ''' Check if IPv4 address is valid '''\n if socket.inet_aton(self.ip):\n return True\n return None\n\n def createCxn(self):\n ''' Create a connection '''\n self.cxn = cConnect(self.usr,self.pwd,self.ip)\n self.cxn.ssh()\n\n#----------END CLASS-------------#\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6561514139175415,
"alphanum_fraction": 0.659305989742279,
"avg_line_length": 16.55555534362793,
"blob_id": "4b09422df08681aed88134c0759807dd22e34b97",
"content_id": "9dbbcc77af18cb25bb5cca44fc881732836d9240",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 317,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 18,
"path": "/tools/commonTools/overTheWire/bandit.py",
"repo_name": "christinefaye/scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\nsys.path.append(\"/home/christine/st/scripts/tools/commonMod\")\nfrom cmHost import cHost\n\ndef main():\n url='bandit.labs.overthewire.org'\n usr='bandit0'\n pw='bandit0'\n\n # Bandit host\n b = cHost(usr,pw,url=url)\n # Connect to host\n b.createCxn()\n\nif __name__==\"__main__\":\n main()\n\n"
}
] | 3 |
sokoloveav/pose-classification-kit
|
https://github.com/sokoloveav/pose-classification-kit
|
143c6912f20452d92256951bfd6dc8b1f78e4849
|
68b01ddc52a596ae5f7fadc1060192f40e643dde
|
995e28cd0501add2cef570562fa6f63be86e1488
|
refs/heads/master
| 2023-07-08T01:43:30.088415 | 2021-08-16T19:36:02 | 2021-08-16T19:36:02 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5110564827919006,
"alphanum_fraction": 0.5436995625495911,
"avg_line_length": 31.0112361907959,
"blob_id": "e76b96f5150b6427cb4952c462d591d9ab3d9ea6",
"content_id": "c4f4ac2256d6f09c363885451d69f103c6ec0864",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2849,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 89,
"path": "/pose_classification_kit/src/keypoints_analysis/dynamic_bar_graph_widget.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom ..imports.qt import QtWidgets\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib import figure, patches, path\n\n\nclass BarGraphWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n layout = QtWidgets.QVBoxLayout(self)\n self.fig = figure.Figure(figsize=(5, 5))\n self.fig.subplots_adjust(bottom=0.0, top=0.975, left=0.0, right=1.0)\n self.canvas = FigureCanvas(self.fig)\n\n layout.addWidget(self.canvas)\n self.nbrCategories = 0\n self.offset_nullValue = 0.01\n self.ax = self.canvas.figure.subplots()\n self.ax.set_xlim(0.0, 1.0)\n self.ax.set_ylim(0.0, 1.0)\n self.ax.axis(\"off\")\n self.changeCategories([])\n self.updateValues(np.random.rand(self.nbrCategories))\n\n def changeCategories(self, categories: int):\n self.clear()\n self.nbrCategories = len(categories)\n if self.nbrCategories == 0:\n bottom = 0\n top = 0\n left = 0\n right = self.offset_nullValue\n nrects = 0\n\n else:\n bins = np.array(\n [float(i) / self.nbrCategories for i in range(self.nbrCategories + 1)]\n )\n\n bottom = bins[:-1] + (0.1 / self.nbrCategories)\n top = bins[1:] - (0.1 / self.nbrCategories)\n left = np.zeros(len(top))\n right = left + self.offset_nullValue\n nrects = len(top)\n\n nverts = nrects * (1 + 3 + 1)\n self.verts = np.zeros((nverts, 2))\n codes = np.full(nverts, path.Path.LINETO)\n codes[0::5] = path.Path.MOVETO\n codes[4::5] = path.Path.CLOSEPOLY\n self.verts[0::5, 0] = left\n self.verts[0::5, 1] = bottom\n self.verts[1::5, 0] = left\n self.verts[1::5, 1] = top\n self.verts[2::5, 0] = right\n self.verts[2::5, 1] = top\n self.verts[3::5, 0] = right\n self.verts[3::5, 1] = bottom\n\n patch = None\n\n barpath = path.Path(self.verts, codes)\n patch = patches.PathPatch(barpath, facecolor=\"#9500ff\", alpha=0.5)\n self.ax.add_patch(patch)\n\n # Add category names\n font = {\n \"family\": \"serif\",\n \"color\": \"#454545\",\n \"weight\": \"normal\",\n \"fontsize\": \"large\",\n \"fontname\": \"DejaVu Sans\",\n }\n\n for i, cat in enumerate(categories):\n posy = (bottom[i] * 2 + top[i]) / 3.0\n self.ax.text(0.05, posy, cat.replace(\"_\", \" \"), fontdict=font)\n\n self.ax.axis(\"off\")\n self.canvas.draw()\n\n def updateValues(self, values: np.ndarray):\n self.verts[2::5, 0] = values + self.offset_nullValue\n self.verts[3::5, 0] = values + self.offset_nullValue\n self.canvas.draw()\n\n def clear(self):\n self.ax.clear()\n"
},
{
"alpha_fraction": 0.5960784554481506,
"alphanum_fraction": 0.5970588326454163,
"avg_line_length": 30.875,
"blob_id": "57408bd5fe8728cd2cb970b6f72f31e8dfe7d458",
"content_id": "8158e4cb845a2a67491120ae6c46b22d64d5b1b1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1020,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 32,
"path": "/pose_classification_kit/src/imports/tensorflow.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import os\n\nSHOW_TF_WARNINGS = False\nif not SHOW_TF_WARNINGS:\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\" # Avoid annoying tf warnings\n\ntry:\n import tensorflow as tf\n\n GPU_LIST = tf.config.experimental.list_physical_devices(\"GPU\")\n if GPU_LIST:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in GPU_LIST:\n # Prevent Tensorflow to take all GPU memory\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n print(\n len(GPU_LIST), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\"\n )\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n TF_LOADED = True\nexcept:\n TF_LOADED = False\n\nTF_STATUS_STR = (\n (\"TensorFlow running ({} GPU)\".format(len(GPU_LIST)))\n if TF_LOADED\n else \"TensorFlow not found.\"\n)\n"
},
{
"alpha_fraction": 0.6119800209999084,
"alphanum_fraction": 0.6171380877494812,
"avg_line_length": 41.92856979370117,
"blob_id": "418e4c21282c8b7f5a1804d6555c94c5953f85a4",
"content_id": "77b6536c221db78d8208872b7a074efca7222067",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6010,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 140,
"path": "/pose_classification_kit/datasets/data_augmentation.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef dataAugmentation(\n x: np.ndarray,\n y: np.ndarray,\n augmentation_ratio: float = 0.5,\n remove_specific_keypoints: list = None,\n remove_rand_keypoints_nbr: int = None,\n random_noise_standard_deviation: float = None,\n scaling_factor: float = None,\n rotation_angle: float = None,\n scaling_factor_standard_deviation: float = None,\n rotation_angle_standard_deviation: float = None,\n):\n\n \"\"\"This function adds entries in the dataset by applying several data augmentation techniques\n depending on the arguments that are given.\n\n Args:\n x (np.ndarray): Dataset of entries for the neural network, works with either BODY25 or BODY18\n y (np.ndarray): Labels, one per entry\n augmentation_ratio (float, optional): The given float will be proportion of entries of the dataset that will be created by the\n data augmentation function. Defaults to .5.\n remove_specific_keypoints (list, optional): Remove keypoints indicated in the given list. Defaults to None.\n remove_rand_keypoints_nbr (int, optional): Remove the given number of keypoints randomly for each entry. Defaults to None.\n random_noise_standard_deviation (float, optional): Add noise for each keypoint following a normal distribution of\n the given standard deviation. Defaults to None.\n scaling_factor (float, optional): Scale every keypoint by the given scaling factor. Defaults to None.\n rotation_angle (float, optional): Rotate every keypoint by the given rotating angle. Defaults to None.\n scaling_factor_standard_deviation (float, optional): Scale each keypoint by a different scaling factor\n generated by a normal distribution of the given standard deviation. Defaults to None.\n rotation_angle_standard_deviation (float, optional): Rotate each keypoint by a different rotation angle\n generated by a normal distribution of the given standard deviation. Defaults to None.\n\n Returns:\n tuple(np.ndarray, np.ndarray): returns all the created entries and the labels associated\n \"\"\"\n\n size_dataset, number_keypoints, *_ = x.shape\n\n # Number of entries that will be created\n number_entries_to_create = size_dataset * augmentation_ratio\n\n # Where is stored newly created entries\n new_x = []\n new_y = []\n\n # Shuffle the entries\n shuffler = np.random.permutation(size_dataset)\n x = x[shuffler]\n y = y[shuffler]\n\n index_dataset = 0\n\n # Go through each entry one by one\n while number_entries_to_create != 0:\n\n entry = []\n\n # The scaling factor that will be used for this entry\n if type(scaling_factor_standard_deviation) != type(None):\n scaling_factor_random = np.random.normal(\n 1, scaling_factor_standard_deviation\n )\n\n # The rotation angle that will be used for this entry\n if type(rotation_angle_standard_deviation) != type(None):\n rotation_angle_random = np.random.normal(\n 0, rotation_angle_standard_deviation\n )\n\n # The loist of keypoints that will be removed for this entry\n if type(remove_rand_keypoints_nbr) != type(None):\n list_random_keypoints = [\n np.random.randint(0, number_keypoints)\n for i in range(remove_rand_keypoints_nbr)\n ]\n\n # Go through the keypoints of the entry\n for i in range(number_keypoints):\n keypoint_x = x[index_dataset][i][0]\n keypoint_y = x[index_dataset][i][1]\n\n # Apply normal noise\n if type(random_noise_standard_deviation) != type(None):\n keypoint_x += np.random.normal(0, random_noise_standard_deviation)\n keypoint_y += np.random.normal(0, random_noise_standard_deviation)\n\n # Apply the scaling faction\n if type(scaling_factor) != type(None):\n keypoint_x *= scaling_factor\n keypoint_y *= scaling_factor\n if type(scaling_factor_standard_deviation) != type(None):\n keypoint_x *= scaling_factor_random\n keypoint_y *= scaling_factor_random\n\n # Apply the rotation\n if type(rotation_angle) != type(None):\n theta = np.radians(rotation_angle)\n c, s = np.cos(theta), np.sin(theta)\n rotation_matrix = np.array(((c, -s), (s, c)))\n keypoint = np.array([keypoint_x, keypoint_y])\n rotated_keypoint = np.dot(rotation_matrix, keypoint)\n keypoint_x = rotated_keypoint[0]\n keypoint_y = rotated_keypoint[1]\n if type(rotation_angle_standard_deviation) != type(None):\n theta = np.radians(rotation_angle_random)\n c, s = np.cos(theta), np.sin(theta)\n rotation_matrix = np.array(((c, -s), (s, c)))\n keypoint = np.array([keypoint_x, keypoint_y])\n rotated_keypoint = np.dot(rotation_matrix, keypoint)\n keypoint_x = rotated_keypoint[0]\n keypoint_y = rotated_keypoint[1]\n\n # Remove the points\n if type(remove_rand_keypoints_nbr) != type(None):\n if i in list_random_keypoints:\n keypoint_x = 0.0\n keypoint_y = 0.0\n if type(remove_specific_keypoints) != type(None):\n if i in remove_specific_keypoints:\n keypoint_x = 0.0\n keypoint_y = 0.0\n # Add additionnal augmentation features\n entry.append([keypoint_x, keypoint_y])\n\n new_x.append(entry)\n new_y.append(y[index_dataset])\n\n # If the augmentation_ratio is more than 1, after going through the whole\n # dataset, it will start over\n index_dataset = (index_dataset + 1) % size_dataset\n\n number_entries_to_create -= 1\n\n new_x = np.array(new_x)\n new_y = np.array(new_y)\n\n return (new_x, new_y)\n"
},
{
"alpha_fraction": 0.5806451439857483,
"alphanum_fraction": 0.6474654674530029,
"avg_line_length": 35.16666793823242,
"blob_id": "bf079ab18a8735e4a67b28b24f7dac2192b3ec97",
"content_id": "133bc4989b34a76472ce7a8af4e420bd620bf71f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1736,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 48,
"path": "/pose_classification_kit/models/__init__.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "from ..config import MODELS_PATH\nfrom ..src.imports.tensorflow import tf\n\n# fmt: off\navailableModelsPath = {\n \"9class_3x64_body18\": MODELS_PATH / \"Body\" / \"9Classes_3x64_BODY18\" / \"9Classes_3x64_body18.h5\",\n \"9class_3x64_body25\": MODELS_PATH / \"Body\" / \"9Classes_3x64_BODY25\" / \"9Classes_3x64_body25.h5\",\n \"24class_2x128_rightHand\": MODELS_PATH / \"Hands\" / \"24Output-2x128-17epochs\" / \"24Output-2x128-17epochs_right.h5\",\n \"24class_2x128_leftHand\": MODELS_PATH / \"Hands\" / \"24Output-2x128-17epochs\" / \"24Output-2x128-17epochs_left.h5\",\n \"27class_3x64_rightHand\": MODELS_PATH / \"Hands\" / \"27Class_3x64\" / \"27Class_3x64_right.h5\",\n \"27class_3x64_leftHand\": MODELS_PATH / \"Hands\" / \"27Class_3x64\" / \"27Class_3x64_left.h5\",\n}\n# fmt: on\n\nAVAILABLE_MODELS = list(availableModelsPath.keys())\n\nfor modelName, modelPath in availableModelsPath.items():\n if not modelPath.is_file():\n print(modelName, \"missing at\", modelPath)\n\n\ndef getModel(modelName: str):\n \"\"\"Load pre-trained available model -- see AVAILABLE_MODELS.\n\n Args:\n modelName (str): Name of the model to load.\n\n Returns:\n (tf.keras.Model, List[str]): Pre-trained Keras model and list of output class labels.\n \"\"\"\n model = None\n labels = None\n\n if modelName in availableModelsPath:\n modelPath = availableModelsPath[modelName]\n model = tf.keras.models.load_model(modelPath)\n\n classPath = modelPath.parent / \"class.txt\"\n\n if classPath.is_file():\n with open(classPath, \"r\") as file:\n labels = file.readline().split(\",\")\n else:\n print(\"No class file available.\")\n else:\n print(\"Model not found.\")\n\n return model, labels\n"
},
{
"alpha_fraction": 0.6279676556587219,
"alphanum_fraction": 0.6389061808586121,
"avg_line_length": 34.75555419921875,
"blob_id": "b8c7374333bcec0733b9dc6b9205aaafd0152204",
"content_id": "9f4211b56b811295817d58db73d5277ebaa0d5fd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8045,
"license_type": "permissive",
"max_line_length": 148,
"num_lines": 225,
"path": "/pose_classification_kit/datasets/__init__.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\n\nfrom ..config import DATASETS_PATH\nfrom .body_models import BodyModel, BODY25, BODY18, BODY25_to_BODY18_indices\nfrom .data_augmentation import dataAugmentation\n\n\ndef get_one_hot(targets: np.ndarray, nb_classes: int):\n res = np.eye(nb_classes)[np.array(targets).reshape(-1)]\n return res.reshape(list(targets.shape) + [nb_classes])\n\n\ndef importBodyCSVDataset(testSplit: float, local_import: bool):\n \"\"\"Import body dataset as numpy arrays from GitHub if available, or local dataset otherwise.\n\n Args:\n testSplit (float, optional): Percentage of the dataset reserved for testing. Defaults to 0.15. Must be between 0.0 and 1.0.\n \"\"\"\n assert 0.0 <= testSplit <= 1.0\n\n datasetPath = DATASETS_PATH / \"BodyPose_Dataset.csv\"\n datasetURL = \"https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/pose_classification_kit/datasets/BodyPose_Dataset.csv\"\n\n if local_import:\n dataset_df = pd.read_csv(datasetPath)\n else:\n dataset_df = pd.read_csv(datasetURL)\n\n bodyLabels_df = dataset_df.groupby(\"label\")\n labels = list(dataset_df.label.unique())\n\n # Find the minimum number of samples accross categories to uniformly distributed sample sets\n total_size_cat = bodyLabels_df.size().min()\n test_size_cat = int(total_size_cat * testSplit)\n train_size_cat = total_size_cat - test_size_cat\n\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n # Iterate over each labeled group\n for label, group in bodyLabels_df:\n # remove irrelevant columns\n group_array = group.drop([\"label\", \"accuracy\"], axis=1).to_numpy()\n np.random.shuffle(group_array)\n\n group_array_2D = [np.array((x[::2], x[1::2])).T for x in group_array]\n\n x_train.append(group_array_2D[:train_size_cat])\n y_train.append([label] * train_size_cat)\n x_test.append(group_array_2D[train_size_cat : train_size_cat + test_size_cat])\n y_test.append([label] * test_size_cat)\n\n # Concatenate sample sets as numpy arrays\n x_train = np.concatenate(x_train, axis=0)\n x_test = np.concatenate(x_test, axis=0)\n y_train = np.concatenate(y_train, axis=0)\n y_test = np.concatenate(y_test, axis=0)\n\n return x_train, x_test, y_train, y_test, labels\n\n\ndef bodyDataset(\n testSplit: float = 0.15,\n shuffle: bool = True,\n bodyModel: BodyModel = BODY25,\n local_import: bool = False,\n):\n \"\"\"Return the dataset of body keypoints (see pose_classification_kit/datasets/BodyPose_Dataset.csv)\n as numpy arrays.\n\n Args:\n testSplit (float, optional): Percentage of the dataset reserved for testing. Defaults to 0.15. Must be between 0.0 and 1.0.\n shuffle (bool, optional): Shuffle the whole dataset. Defaults to True.\n bodyModel (BodyModel, optional): Select the keypoint format of the dataset. BODY25 or BODY18. Defaults to BODY25.\n local_import (bool, optional): Choose to use local dataset or fetch online dataset (global repository). Default False.\n\n Returns:\n dict: {\n 'x_train': training keypoints,\n 'y_train': training labels,\n 'y_train_onehot': training labels one-hot encoded,\n 'x_test': testing keypoints,\n 'y_test': testing labels,\n 'y_test_onehot': testing labels one-hot encoded,\n 'labels': list of labels\n }\n \"\"\"\n\n x_train, x_test, y_train, y_test, labels = importBodyCSVDataset(\n testSplit, local_import\n )\n\n # Shuffle in unison\n if shuffle:\n shuffler_train = np.random.permutation(x_train.shape[0])\n shuffler_test = np.random.permutation(x_test.shape[0])\n x_train = x_train[shuffler_train]\n x_test = x_test[shuffler_test]\n y_train = y_train[shuffler_train]\n y_test = y_test[shuffler_test]\n\n # Format to requested body model\n assert bodyModel in [BODY18, BODY25]\n if bodyModel == BODY18:\n x_train = x_train[:, BODY25_to_BODY18_indices]\n x_test = x_test[:, BODY25_to_BODY18_indices]\n\n # One-hot encoding\n y_train_onehot = get_one_hot(\n np.array([labels.index(sample) for sample in y_train]), len(labels)\n )\n y_test_onehot = get_one_hot(\n np.array([labels.index(sample) for sample in y_test]), len(labels)\n )\n\n return {\n \"x_train\": x_train,\n \"y_train\": y_train,\n \"y_train_onehot\": y_train_onehot,\n \"x_test\": x_test,\n \"y_test\": y_test,\n \"y_test_onehot\": y_test_onehot,\n \"labels\": np.array(labels),\n }\n\n\ndef handDataset(\n testSplit: float = 0.15,\n shuffle: bool = True,\n handID: int = 0,\n local_import: bool = False,\n):\n \"\"\"Return the dataset of hand keypoints (see pose_classification_kit/datasets/HandPose_Dataset.csv)\n as numpy arrays.\n\n Args:\n testSplit (float, optional): Percent of the dataset reserved for testing. Defaults to 0.15. Must be between 0.0 and 1.0.\n shuffle (bool, optional): Shuffle the whole dataset. Defaults to True.\n handID (int, optional): Select hand side - 0:left, 1:right. Default to 0.\n local_import (bool, optional): Choose to use local dataset or fetch online dataset (global repository). Default False.\n\n Returns:\n dict: {\n 'x_train': training keypoints,\n 'y_train': training labels,\n 'y_train_onehot': training labels one-hot encoded,\n 'x_test': testing keypoints,\n 'y_test': testing labels,\n 'y_test_onehot': testing labels one-hot encoded,\n 'labels': list of labels\n }\n \"\"\"\n assert 0.0 <= testSplit <= 1.0\n\n datasetPath = DATASETS_PATH / \"HandPose_Dataset.csv\"\n datasetURL = \"https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/pose_classification_kit/datasets/HandPose_Dataset.csv\"\n\n if local_import:\n dataset_df = pd.read_csv(datasetPath)\n else:\n dataset_df = pd.read_csv(datasetURL)\n\n hand_label = \"right\" if handID else \"left\"\n handLabels_df = {\n hand_i: dataset_df.loc[dataset_df[\"hand\"] == hand_i].groupby(\"label\")\n for hand_i in [\"left\", \"right\"]\n }\n labels = list(dataset_df.label.unique())\n\n # Find the minimum number of samples accross categories to uniformly distributed sample sets\n total_size_cat = handLabels_df[hand_label].size().min()\n test_size_cat = int(total_size_cat * testSplit)\n train_size_cat = total_size_cat - test_size_cat\n\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n # Iterate over each labeled group\n for label, group in handLabels_df[hand_label]:\n # remove irrelevant columns\n group_array = group.drop([\"label\", \"hand\", \"accuracy\"], axis=1).to_numpy()\n np.random.shuffle(group_array)\n\n x_train.append(group_array[:train_size_cat])\n y_train.append([label] * train_size_cat)\n x_test.append(group_array[train_size_cat : train_size_cat + test_size_cat])\n y_test.append([label] * test_size_cat)\n\n # Concatenate sample sets as numpy arrays\n x_train = np.concatenate(x_train, axis=0)\n x_test = np.concatenate(x_test, axis=0)\n y_train = np.concatenate(y_train, axis=0)\n y_test = np.concatenate(y_test, axis=0)\n\n # Shuffle in unison\n if shuffle:\n shuffler_test = np.random.permutation(test_size_cat * len(labels))\n shuffler_train = np.random.permutation(train_size_cat * len(labels))\n x_train = x_train[shuffler_train]\n x_test = x_test[shuffler_test]\n y_train = y_train[shuffler_train]\n y_test = y_test[shuffler_test]\n\n # One-hot encoding\n y_train_onehot = get_one_hot(\n np.array([labels.index(sample) for sample in y_train]), len(labels)\n )\n y_test_onehot = get_one_hot(\n np.array([labels.index(sample) for sample in y_test]), len(labels)\n )\n\n return {\n \"x_train\": x_train,\n \"y_train\": y_train,\n \"y_train_onehot\": y_train_onehot,\n \"x_test\": x_test,\n \"y_test\": y_test,\n \"y_test_onehot\": y_test_onehot,\n \"labels\": np.array(labels),\n }\n"
},
{
"alpha_fraction": 0.5881832242012024,
"alphanum_fraction": 0.6031201481819153,
"avg_line_length": 35.44355010986328,
"blob_id": "50e45cc50935a924cbd01767047f6a1abc9fba11",
"content_id": "ebf219b97feb121f7374225ee3b2c963ab025cd0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9038,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 248,
"path": "/pose_classification_kit/src/keypoints_analysis/hand_analysis.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "from ..imports.qt import QtWidgets, QtCore\nfrom ..imports.tensorflow import tf, TF_LOADED\nfrom .dynamic_bar_graph_widget import BarGraphWidget\nfrom .classifier_selection_widget import ClassifierSelectionWidget\n\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib import figure, lines\n\n\nclass HandPlotWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n layout = QtWidgets.QVBoxLayout(self)\n self.canvas = FigureCanvas(figure.Figure(figsize=(5, 3)))\n layout.addWidget(self.canvas)\n self.setMinimumHeight(50)\n\n self.ax = self.canvas.figure.subplots()\n self.ax.set_xlim([-1.0, 1.0])\n self.ax.set_ylim([-1.0, 1.0])\n self.ax.set_aspect(\"equal\")\n\n self.fingerLines = [\n lines.Line2D([], [], color=\"r\"),\n lines.Line2D([], [], color=\"y\"),\n lines.Line2D([], [], color=\"g\"),\n lines.Line2D([], [], color=\"b\"),\n lines.Line2D([], [], color=\"m\"),\n ]\n\n for line in self.fingerLines:\n self.ax.add_line(line)\n\n def plotHand(self, handKeypoints, accuracy: int):\n if self.isHandData(handKeypoints):\n colors = [\"r\", \"y\", \"g\", \"b\", \"m\"]\n data = [\n handKeypoints[:, 0:5],\n np.insert(handKeypoints[:, 5:9].T, 0, handKeypoints[:, 0], axis=0).T,\n np.insert(handKeypoints[:, 9:13].T, 0, handKeypoints[:, 0], axis=0).T,\n np.insert(handKeypoints[:, 13:17].T, 0, handKeypoints[:, 0], axis=0).T,\n np.insert(handKeypoints[:, 17:21].T, 0, handKeypoints[:, 0], axis=0).T,\n ]\n for i, line in enumerate(self.fingerLines):\n line.set_data(data[i][0], data[i][1])\n self.ax.set_title(\n \"Accuracy: \" + str(accuracy), fontsize=12, color=\"#454545\"\n )\n else:\n self.clear()\n self.ax.set_title(\"\")\n self.canvas.draw()\n\n def clear(self):\n for line in self.fingerLines:\n line.set_data([], [])\n self.canvas.draw()\n\n def isHandData(self, keypoints):\n b = False\n if type(keypoints) == np.ndarray:\n if keypoints.shape == (3, 21):\n b = True\n return b\n\n\nclass HandAnalysisWidget(QtWidgets.QGroupBox):\n stylesheet = \"\"\"\n #Large_Label {\n font-size: 26px;\n color: #9500ff;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n\n QSplitter::handle {\n color: #cbcbcb;\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n }\n \"\"\"\n\n def __init__(self, handID: int):\n super().__init__((\"Right\" if handID == 1 else \"Left\") + \" hand\")\n self.setStyleSheet(self.stylesheet)\n self.handID = handID\n self.showInput = True\n self.classOutputs = []\n self.modelClassifier = None\n self.currentPrediction = \"\"\n\n self.layout = QtWidgets.QGridLayout(self)\n self.setLayout(self.layout)\n\n self.predictionLabel = QtWidgets.QLabel(self)\n self.predictionLabel.setObjectName(\"Large_Label\")\n self.layout.addWidget(self.predictionLabel)\n self.predictionLabel.setAlignment(QtCore.Qt.AlignCenter)\n\n self.classGraphWidget = BarGraphWidget()\n\n self.handGraphWidget = HandPlotWidget()\n self.graphSplitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)\n self.graphSplitter.setChildrenCollapsible(False)\n self.graphSplitter.addWidget(self.handGraphWidget)\n self.graphSplitter.addWidget(self.classGraphWidget)\n self.graphSplitter.setStretchFactor(0, 2)\n self.graphSplitter.setStretchFactor(1, 1)\n\n self.layout.addWidget(self.graphSplitter)\n\n def setClassifierModel(self, model, classOutputs): # model:tf.keras.models\n self.modelClassifier = model\n self.classOutputs = classOutputs\n self.classGraphWidget.changeCategories(self.classOutputs)\n\n def drawHand(self, handKeypoints: np.ndarray, accuracy: float):\n \"\"\"Draw keypoints of a hand pose in the widget if showInput==True.\n\n Args:\n keypoints (np.ndarray((3,21),float)): Coordinates x, y and the accuracy score for each 21 key points.\n accuracy (float): Global accuracy of detection of the pose.\n \"\"\"\n if self.showInput:\n # self.handGraphWidget.setTitle('Detection accuracy: ' + str(accuracy))\n self.updatePredictedClass(handKeypoints)\n self.handGraphWidget.plotHand(handKeypoints, accuracy)\n\n def updatePredictedClass(self, keypoints: np.ndarray):\n \"\"\"Draw keypoints of a hand pose in the widget.\n\n Args:\n keypoints (np.ndarray((3,21),float)): Coordinates x, y and the accuracy score for each 21 key points.\n \"\"\"\n\n prediction = [0 for i in self.classOutputs]\n title = \"\"\n if type(keypoints) != type(None):\n inputData = []\n for i in range(keypoints.shape[1]):\n inputData.append(keypoints[0, i]) # add x\n inputData.append(keypoints[1, i]) # add y\n inputData = np.array(inputData)\n\n if self.modelClassifier is not None:\n prediction = self.modelClassifier.predict(np.array([inputData]))[0]\n self.currentPrediction = self.classOutputs[np.argmax(prediction)]\n title = self.currentPrediction\n\n self.classGraphWidget.updateValues(np.array(prediction))\n self.setPredictionText(title)\n\n def newModelLoaded(self, urlModel: str, modelInfo: list, handID: int):\n if TF_LOADED:\n if urlModel == \"None\":\n self.setClassifierModel(None, [])\n else:\n if handID == self.handID:\n model = tf.keras.models.load_model(urlModel)\n nbrClass = model.layers[-1].output_shape[1]\n if (\n modelInfo\n and modelInfo.get(\"labels\")\n and len(modelInfo.get(\"labels\")) == nbrClass\n ):\n classOutputs = modelInfo.get(\"labels\")\n else:\n classOutputs = [str(i) for i in range(1, nbrClass + 1)]\n self.setClassifierModel(model, classOutputs)\n\n def getCurrentPrediction(self) -> str:\n return self.currentPrediction\n\n def setPredictionText(self, prediction: str):\n self.predictionLabel.setText(prediction)\n\n\nclass HandClassifierWidget(QtWidgets.QWidget):\n stylesheet = \"\"\"\n #Hand_classifier {\n background-color: white;\n border-radius: 3px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QGroupBox {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QLabel {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QPushButton {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: white;\n padding: 3px;\n }\n QComboBox {\n border: 1px solid #cbcbcb;\n border-radius: 3px;\n font-size: 16px;\n background: white;\n }\n QPushButton:hover {\n border-color: rgb(139, 173, 228);\n }\n QPushButton:pressed {\n background: #cbcbcb;\n }\n \"\"\"\n\n def __init__(self):\n super().__init__()\n ## Widget style\n self.setObjectName(\"Hand_classifier\")\n self.setAttribute(QtCore.Qt.WA_StyledBackground, True)\n self.setStyleSheet(self.stylesheet)\n\n effect = QtWidgets.QGraphicsDropShadowEffect(self)\n effect.setBlurRadius(10)\n effect.setOffset(0, 0)\n effect.setColor(QtCore.Qt.gray)\n self.setGraphicsEffect(effect)\n\n ## Structure\n self.layout = QtWidgets.QGridLayout(self)\n self.setLayout(self.layout)\n self.layout.setRowStretch(0, 1)\n self.layout.setRowStretch(1, 0)\n\n self.classifierWidget = ClassifierSelectionWidget(\n parent=self, bodyClassification=False\n )\n self.layout.addWidget(self.classifierWidget, 2, 0, 1, 2)\n\n self.leftHandAnalysis = HandAnalysisWidget(0)\n self.classifierWidget.newClassifierModel_Signal.connect(\n self.leftHandAnalysis.newModelLoaded\n )\n self.layout.addWidget(self.leftHandAnalysis, 0, 0, 2, 1)\n\n self.rightHandAnalysis = HandAnalysisWidget(1)\n self.classifierWidget.newClassifierModel_Signal.connect(\n self.rightHandAnalysis.newModelLoaded\n )\n self.layout.addWidget(self.rightHandAnalysis, 0, 1, 2, 1)\n"
},
{
"alpha_fraction": 0.482587069272995,
"alphanum_fraction": 0.5046336650848389,
"avg_line_length": 38.27586364746094,
"blob_id": "ca7f316c7fc6e8a56b5dceeadaabfd5bde936c7d",
"content_id": "c0b648967e4918b7e03dc9d92e70dd0992c9b3a2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10251,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 261,
"path": "/pose_classification_kit/src/video_analysis/openpose_thread.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "from ..imports.qt import QtCore, pyqtSignal, pyqtSlot\nfrom ..imports.openpose import OPENPOSE_LOADED, OPENPOSE_MODELS_PATH\n\nif OPENPOSE_LOADED:\n from ..imports.openpose import op\nimport cv2\nimport numpy as np\n\n\ndef getLengthLimb(data, keypoint1: int, keypoint2: int):\n if data[keypoint1, 2] > 0.0 and data[keypoint2, 2] > 0:\n return np.linalg.norm([data[keypoint1, 0:2] - data[keypoint2, 0:2]])\n return 0\n\n\nclass VideoAnalysisThread(QtCore.QThread):\n newFrame = pyqtSignal(np.ndarray)\n\n def __init__(self, videoSource):\n super().__init__()\n self.infoText = \"\"\n self.personID = 0\n self.running = False\n self.last_frame = np.array([])\n self.videoSource = videoSource\n\n ## Starting OpenPose ##\n #######################\n if OPENPOSE_LOADED:\n params = dict()\n params[\"model_folder\"] = str(OPENPOSE_MODELS_PATH)\n params[\"face\"] = False\n params[\"hand\"] = True\n params[\"disable_multi_thread\"] = False\n netRes = 15 # Default 22\n params[\"net_resolution\"] = \"-1x\" + str(16 * netRes)\n\n self.opWrapper = op.WrapperPython()\n self.datum = op.Datum()\n self.opWrapper.configure(params)\n self.opWrapper.start()\n\n def run(self):\n while OPENPOSE_LOADED:\n if self.running:\n frame = self.videoSource.getLastFrame()\n if (type(frame) != type(None)) and not np.array_equal(\n self.last_frame, frame\n ):\n self.last_frame = frame\n # Check if frame exist, frame!=None is ambigious when frame is an array\n frame = self.resizeCvFrame(frame, 0.5)\n self.datum.cvInputData = frame\n self.opWrapper.emplaceAndPop([self.datum])\n frameOutput = self.datum.cvOutputData\n self.newFrame.emit(frameOutput)\n\n @pyqtSlot(bool)\n def setState(self, s: bool):\n self.running = s\n\n def getHandData(self, handID: int):\n \"\"\"Return the key points of the hand seen in the image (cf. videoSource).\n\n Args:\n handID (int): 0 -> Left hand | 1 -> Right hand\n\n Returns:\n np.ndarray((3,21),float): Coordinates x, y and the accuracy score for each 21 key points.\n None if the given hand is not detected.\n \"\"\"\n outputArray = None\n\n handKeypoints = np.array(self.datum.handKeypoints)\n nbrPersonDetected = handKeypoints.shape[1] if handKeypoints.ndim > 2 else 0\n handAccuaracyScore = 0.0\n if nbrPersonDetected > 0:\n handAccuaracyScore = handKeypoints[handID, self.personID].T[2].sum()\n handDetected = handAccuaracyScore > 1.0\n if handDetected:\n handKeypoints = handKeypoints[handID, self.personID]\n # Initialize with the length of the first segment of each fingers\n lengthFingers = [\n np.sqrt(\n (handKeypoints[0, 0] - handKeypoints[i, 0]) ** 2\n + (handKeypoints[0, 1] - handKeypoints[i, 1]) ** 2\n )\n for i in [1, 5, 9, 13, 17]\n ]\n for i in range(3): # Add length of other segments of each fingers\n for j in range(len(lengthFingers)):\n x = (\n handKeypoints[1 + j * 4 + i + 1, 0]\n - handKeypoints[1 + j * 4 + i, 0]\n )\n y = (\n handKeypoints[1 + j * 4 + i + 1, 1]\n - handKeypoints[1 + j * 4 + i, 1]\n )\n lengthFingers[j] += np.sqrt(x ** 2 + y ** 2)\n normMax = max(lengthFingers)\n\n handCenterX = handKeypoints.T[0].sum() / handKeypoints.shape[0]\n handCenterY = handKeypoints.T[1].sum() / handKeypoints.shape[0]\n outputArray = np.array(\n [\n (handKeypoints.T[0] - handCenterX) / normMax,\n -(handKeypoints.T[1] - handCenterY) / normMax,\n (handKeypoints.T[2]),\n ]\n )\n return outputArray, handAccuaracyScore\n\n def getBodyData(self):\n\n outputArray = None\n accuaracyScore = 0.0\n if len(self.datum.poseKeypoints.shape) > 0:\n\n # Read body data\n outputArray = self.datum.poseKeypoints[self.personID]\n accuaracyScore = outputArray[:, 2].sum()\n\n # Find bouding box\n min_x, max_x = float(\"inf\"), 0.0\n min_y, max_y = float(\"inf\"), 0.0\n for keypoint in outputArray:\n if keypoint[2] > 0.0: # If keypoint exists in image\n min_x = min(min_x, keypoint[0])\n max_x = max(max_x, keypoint[0])\n min_y = min(min_y, keypoint[1])\n max_y = max(max_y, keypoint[1])\n\n # Centering\n np.subtract(\n outputArray[:, 0],\n (min_x + max_x) / 2,\n where=outputArray[:, 2] > 0.0,\n out=outputArray[:, 0],\n )\n np.subtract(\n (min_y + max_y) / 2,\n outputArray[:, 1],\n where=outputArray[:, 2] > 0.0,\n out=outputArray[:, 1],\n )\n\n # Scaling\n normalizedPartsLength = np.array(\n [\n getLengthLimb(outputArray, 1, 8) * (16.0 / 5.2), # Torso\n getLengthLimb(outputArray, 0, 1) * (16.0 / 2.5), # Neck\n getLengthLimb(outputArray, 9, 10) * (16.0 / 3.6), # Right thigh\n getLengthLimb(outputArray, 10, 11)\n * (16.0 / 3.5), # Right lower leg\n getLengthLimb(outputArray, 12, 13) * (16.0 / 3.6), # Left thigh\n getLengthLimb(outputArray, 13, 14) * (16.0 / 3.5), # Left lower leg\n getLengthLimb(outputArray, 2, 5) * (16.0 / 3.4), # Shoulders\n ]\n )\n\n # Mean of non-zero values\n normalizedPartsLength = normalizedPartsLength[normalizedPartsLength > 0.0]\n if len(normalizedPartsLength) > 0:\n scaleFactor = np.mean(normalizedPartsLength)\n else:\n # print(\"Scaling error\")\n return None, 0.0\n\n np.divide(outputArray[:, 0:2], scaleFactor, out=outputArray[:, 0:2])\n\n if np.any((outputArray > 1.0) | (outputArray < -1.0)):\n # print(\"Scaling error\")\n return None, 0.0\n\n outputArray = outputArray.T\n\n return outputArray, accuaracyScore\n\n def getInfoText(self) -> str:\n handKeypoints = np.array(self.datum.handKeypoints)\n nbrPersonDetected = handKeypoints.shape[1] if handKeypoints.ndim > 2 else 0\n\n self.infoText = \"\"\n self.infoText += str(nbrPersonDetected) + (\n \" person detected\" if nbrPersonDetected < 2 else \" person detected\"\n )\n\n if nbrPersonDetected > 0:\n leftHandDetected = handKeypoints[0, self.personID].T[2].sum() > 1.0\n rightHandDetected = handKeypoints[1, self.personID].T[2].sum() > 1.0\n if rightHandDetected and leftHandDetected:\n self.infoText += (\n \", both hands of person \" + str(self.personID + 1) + \" detected.\"\n )\n elif rightHandDetected or leftHandDetected:\n self.infoText += (\n \", \"\n + (\"Right\" if rightHandDetected else \"Left\")\n + \" hand of person \"\n + str(self.personID + 1)\n + \" detected.\"\n )\n else:\n self.infoText += (\n \", no hand of person \" + str(self.personID + 1) + \" detected.\"\n )\n\n return self.infoText\n\n def getFingerLength(self, fingerData):\n length = 0.0\n for i in range(fingerData.shape[0] - 1):\n x = fingerData[i + 1, 0] - fingerData[i, 0]\n y = fingerData[i + 1, 1] - fingerData[i, 1]\n length += np.sqrt(x ** 2 + y ** 2)\n return length\n\n def resizeCvFrame(self, frame, ratio: float):\n width = int(frame.shape[1] * ratio)\n height = int(frame.shape[0] * ratio)\n dim = (width, height)\n # resize image in down scale\n resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n return resized\n\n def isRaisingHand(self):\n poseKeypoints = self.getBodyData()\n raisingRight = False\n raisingLeft = False\n if type(poseKeypoints) != type(None):\n rightHand_x, rightHand_y, rightHand_a = poseKeypoints[4]\n leftHand_x, leftHand_y, leftHand_a = poseKeypoints[7]\n rightShoulder_x, rightShoulder_y, rightShoulder_a = poseKeypoints[2]\n leftShoulder_x, leftShoulder_y, leftShoulder_a = poseKeypoints[5]\n\n try:\n shoulderSlope = (rightShoulder_y - leftShoulder_y) / (\n rightShoulder_x - leftShoulder_x\n )\n except:\n shoulderSlope = 0.0\n shoulderOri = rightShoulder_y - shoulderSlope * rightShoulder_x\n\n if leftHand_a > 0.1:\n raisingLeft = leftHand_y < (\n shoulderSlope * leftHand_x + shoulderOri\n ) # y axis oriented from top to down in images\n raisingLeft = (\n raisingLeft and leftHand_y < poseKeypoints[6, 1]\n ) # Check if hand above elbow\n else:\n raisingLeft = False\n\n if rightHand_a > 0.1:\n raisingRight = rightHand_y < (shoulderSlope * rightHand_x + shoulderOri)\n raisingRight = raisingRight and rightHand_y < poseKeypoints[3, 1]\n else:\n raisingRight = False\n\n return raisingLeft, raisingRight\n"
},
{
"alpha_fraction": 0.6181507110595703,
"alphanum_fraction": 0.6215753555297852,
"avg_line_length": 29.736841201782227,
"blob_id": "62c5279370d0f54f05260a563edb0ef3014c50bc",
"content_id": "9e0c6357eee5ba2ab1dee76b576c177da1324c5a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 584,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 19,
"path": "/pose_classification_kit/src/imports/openpose.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\n\nfrom ...config import OPENPOSE_PATH\n\ntry:\n sys.path.append(str(OPENPOSE_PATH / \"build\" / \"python\" / \"openpose\" / \"Release\"))\n releasePATH = OPENPOSE_PATH / \"build\" / \"x64\" / \"Release\"\n binPATH = OPENPOSE_PATH / \"build\" / \"bin\"\n OPENPOSE_MODELS_PATH = OPENPOSE_PATH / \"models\"\n os.environ[\"PATH\"] = (\n os.environ[\"PATH\"] + \";\" + str(releasePATH) + \";\" + str(binPATH) + \";\"\n )\n import pyopenpose as op\n\n OPENPOSE_LOADED = True\nexcept:\n OPENPOSE_LOADED = False\n print(\"OpenPose ({}) loading failed.\".format(str(OPENPOSE_PATH)))\n"
},
{
"alpha_fraction": 0.2629942297935486,
"alphanum_fraction": 0.3608766496181488,
"avg_line_length": 21.73737335205078,
"blob_id": "176c1d742774ac26687380d43d767760d2ac4eb1",
"content_id": "fdb81c3b2d218d413efec1cd294cfdf9c92d9c65",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6753,
"license_type": "permissive",
"max_line_length": 169,
"num_lines": 297,
"path": "/pose_classification_kit/datasets/body_models.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\nclass BodyModel:\n def __init__(self, mapping, pairs) -> None:\n self.mapping = mapping\n self.pairs = pairs\n\n\nBODY18 = BodyModel(\n mapping=[\n \"nose\",\n \"left_eye\",\n \"right_eye\",\n \"left_ear\",\n \"right_ear\",\n \"left_shoulder\",\n \"right_shoulder\",\n \"left_elbow\",\n \"right_elbow\",\n \"left_wrist\",\n \"right_wrist\",\n \"left_hip\",\n \"right_hip\",\n \"left_knee\",\n \"right_knee\",\n \"left_ankle\",\n \"right_ankle\",\n \"neck\",\n ],\n pairs=[\n [15, 13],\n [13, 11],\n [16, 14],\n [14, 12],\n [11, 12],\n [5, 7],\n [6, 8],\n [7, 9],\n [8, 10],\n [1, 2],\n [0, 1],\n [0, 2],\n [1, 3],\n [2, 4],\n [3, 5],\n [4, 6],\n [17, 0],\n [17, 5],\n [17, 6],\n [17, 11],\n [17, 12],\n ],\n)\n\nBODY18_FLAT = BodyModel(\n mapping=[\n \"nose_x\",\n \"nose_y\",\n \"left_eye_x\",\n \"left_eye_y\",\n \"right_eye_x\",\n \"right_eye_y\",\n \"left_ear_x\",\n \"left_ear_y\",\n \"right_ear_x\",\n \"right_ear_y\",\n \"left_shoulder_x\",\n \"left_shoulder_y\",\n \"right_shoulder_x\",\n \"right_shoulder_y\",\n \"left_elbow_x\",\n \"left_elbow_y\",\n \"right_elbow_x\",\n \"right_elbow_y\",\n \"left_wrist_x\",\n \"left_wrist_y\",\n \"right_wrist_x\",\n \"right_wrist_y\",\n \"left_hip_x\",\n \"left_hip_y\",\n \"right_hip_x\",\n \"right_hip_y\",\n \"left_knee_x\",\n \"left_knee_y\",\n \"right_knee_x\",\n \"right_knee_y\",\n \"left_ankle_x\",\n \"left_ankle_y\",\n \"right_ankle_x\",\n \"right_ankle_y\",\n \"neck_x\",\n \"neck_y\",\n ],\n pairs=[\n [(30, 31), (26, 27)],\n [(26, 27), (22, 23)],\n [(32, 33), (28, 29)],\n [(28, 29), (24, 25)],\n [(22, 23), (24, 25)],\n [(10, 11), (14, 15)],\n [(12, 13), (16, 17)],\n [(14, 15), (18, 19)],\n [(16, 17), (20, 21)],\n [(2, 3), (4, 5)],\n [(0, 1), (2, 3)],\n [(0, 1), (4, 5)],\n [(2, 3), (6, 7)],\n [(4, 5), (8, 9)],\n [(6, 7), (10, 11)],\n [(8, 9), (12, 13)],\n [(34, 35), (0, 1)],\n [(34, 35), (10, 11)],\n [(34, 35), (12, 13)],\n [(34, 35), (22, 23)],\n [(34, 35), (24, 25)],\n ],\n)\n\nBODY25 = BodyModel(\n mapping=[\n \"nose\",\n \"neck\",\n \"right_shoulder\",\n \"right_elbow\",\n \"right_wrist\",\n \"left_shoulder\",\n \"left_elbow\",\n \"left_wrist\",\n \"mid_hip\",\n \"right_hip\",\n \"right_knee\",\n \"right_ankle\",\n \"left_hip\",\n \"left_knee\",\n \"left_ankle\",\n \"right_eye\",\n \"left_eye\",\n \"right_ear\",\n \"left_ear\",\n \"left_bigtoe\",\n \"left_smalltoe\",\n \"left_heel\",\n \"right_bigtoe\",\n \"right_smalltoe\",\n \"right_heel\",\n ],\n pairs=[\n [1, 8],\n [1, 2],\n [1, 5],\n [2, 3],\n [3, 4],\n [5, 6],\n [6, 7],\n [8, 9],\n [9, 10],\n [10, 11],\n [8, 12],\n [12, 13],\n [13, 14],\n [1, 0],\n [0, 15],\n [15, 17],\n [0, 16],\n [16, 18],\n [2, 17],\n [5, 18],\n [14, 19],\n [19, 20],\n [14, 21],\n [11, 22],\n [22, 23],\n [11, 24],\n ],\n)\n\"\"\" #BODY25 annotated\n pairs_annotated={\n \"Torso\":[1, 8], \n \"Shoulder (right)\":[1, 2],\n \"Shoulder (left)\":[1, 5],\n \"Arm (right)\":[2, 3],\n \"Forearm (right)\":[3, 4],\n \"Arm (left)\":[5, 6],\n \"Forearm (left)\":[6, 7],\n \"Hip (right)\":[8, 9],\n \"Thigh (right)\":[9, 10],\n \"Leg (right)\":[10, 11],\n \"Hip (left)\":[8, 12],\n \"Thigh (left)\":[12, 13],\n \"Leg (left)\":[13, 14],\n \"Neck\":[1, 0],\n \"Eye (right)\":[0, 15],\n \"Ear (right)\":[15, 17],\n \"Eye (left)\":[0, 16],\n \"Ear (left)\":[16, 18],\n \"Foot (left)\":[14, 19],\n \"Toe (left)\":[19, 20],\n \"Heel (left)\":[14, 21],\n \"Foot (right)\":[11, 22],\n \"Toe (right)\":[22, 23],\n \"Heel (right)\":[11, 24],\n }\n\"\"\"\n\nBODY25_FLAT = BodyModel(\n mapping=[\n \"nose_x\",\n \"nose_y\",\n \"neck_x\",\n \"neck_y\",\n \"right_shoulder_x\",\n \"right_shoulder_y\",\n \"right_elbow_x\",\n \"right_elbow_y\",\n \"right_wrist_x\",\n \"right_wrist_y\",\n \"left_shoulder_x\",\n \"left_shoulder_y\",\n \"left_elbow_x\",\n \"left_elbow_y\",\n \"left_wrist_x\",\n \"left_wrist_y\",\n \"mid_hip_x\",\n \"mid_hip_y\",\n \"right_hip_x\",\n \"right_hip_y\",\n \"right_knee_x\",\n \"right_knee_y\",\n \"right_ankle_x\",\n \"right_ankle_y\",\n \"left_hip_x\",\n \"left_hip_y\",\n \"left_knee_x\",\n \"left_knee_y\",\n \"left_ankle_x\",\n \"left_ankle_y\",\n \"right_eye_x\",\n \"right_eye_y\",\n \"left_eye_x\",\n \"left_eye_y\",\n \"right_ear_x\",\n \"right_ear_y\",\n \"left_ear_x\",\n \"left_ear_y\",\n \"left_bigtoe_x\",\n \"left_bigtoe_y\",\n \"left_smalltoe_x\",\n \"left_smalltoe_y\",\n \"left_heel_x\",\n \"left_heel_y\",\n \"right_bigtoe_x\",\n \"right_bigtoe_y\",\n \"right_smalltoe_x\",\n \"right_smalltoe_y\",\n \"right_heel_x\",\n \"right_heel_y\",\n ],\n pairs=[\n [(2, 3), (16, 17)],\n [(2, 3), (4, 5)],\n [(2, 3), (10, 11)],\n [(4, 5), (6, 7)],\n [(6, 7), (8, 9)],\n [(10, 11), (12, 13)],\n [(12, 13), (14, 15)],\n [(16, 17), (18, 19)],\n [(18, 19), (20, 21)],\n [(20, 21), (22, 23)],\n [(16, 17), (24, 25)],\n [(24, 25), (26, 27)],\n [(26, 27), (28, 29)],\n [(2, 3), (0, 1)],\n [(0, 1), (30, 31)],\n [(30, 31), (34, 35)],\n [(0, 1), (32, 33)],\n [(32, 33), (36, 37)],\n [(4, 5), (34, 35)],\n [(10, 11), (36, 37)],\n [(28, 29), (38, 39)],\n [(38, 39), (40, 41)],\n [(28, 29), (42, 43)],\n [(22, 23), (44, 45)],\n [(44, 45), (46, 47)],\n [(22, 23), (48, 49)],\n ],\n)\n\n# fmt: off\nBODY25_to_BODY18_indices = [0, 16, 15, 18, 17, 5, 2, 6, 3, 7, 4, 12, 9, 13, 10, 14, 11, 1]\nBODY25flat_to_BODY18flat_indices = [0, 1, 32, 33, 30, 31, 36, 37, 34, 35, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, 8, 9, 24, 25, 18, 19, 26, 27, 20, 21, 28, 29, 22, 23, 2, 3]\n# fmt: on\n\n\ndef BODY25_to_BODY18(body25_keypoints: np.ndarray):\n assert body25_keypoints.shape == 25\n return body25_keypoints[BODY25_to_BODY18_indices]\n"
},
{
"alpha_fraction": 0.6034685373306274,
"alphanum_fraction": 0.6148129105567932,
"avg_line_length": 32.34347915649414,
"blob_id": "dcc7ca1287de7b6689f7acafb385f983dfd66bbc",
"content_id": "4389fcaa72730c7f8e0b4440c70d2373a6128c19",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7669,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 230,
"path": "/pose_classification_kit/src/video_analysis/video_manager.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "from ..imports.qt import QtWidgets, QtCore, QtGui, QtMultimedia, pyqtSignal, pyqtSlot\nfrom .openpose_thread import OPENPOSE_LOADED\nfrom ...config import OPENPOSE_PATH\nimport pathlib\nimport cv2\nimport numpy as np\nimport qimage2ndarray\n\n\nclass CameraInput:\n def __init__(self):\n self.available_cameras = QtMultimedia.QCameraInfo.availableCameras()\n if not self.available_cameras:\n print(\"No camera\")\n pass # quit\n\n self.buffer = QtCore.QBuffer\n # self.lastImage = QtGui.QImage('.\\\\Data\\\\tempInit.png')\n self.lastImage = QtGui.QPixmap(10, 10).toImage()\n self.lastID = None\n self.save_path = \"\"\n self.tmpUrl = str(\n pathlib.Path(__file__).parent.absolute() / \"tmp.png\"\n ) # / 'Data'\n\n self.capture = None\n\n self.select_camera(0)\n\n def refreshCameraList(self):\n self.available_cameras = QtMultimedia.QCameraInfo.availableCameras()\n if not self.available_cameras:\n print(\"No camera\")\n return None\n self.camera.stop()\n self.select_camera(0)\n return self.available_cameras\n\n def getAvailableCam(self):\n return self.available_cameras\n\n def select_camera(self, i):\n if len(self.available_cameras) > 0:\n self.camera = QtMultimedia.QCamera(self.available_cameras[i])\n self.camera.setCaptureMode(QtMultimedia.QCamera.CaptureStillImage)\n self.camera.start()\n\n self.capture = QtMultimedia.QCameraImageCapture(self.camera)\n self.capture.setCaptureDestination(\n QtMultimedia.QCameraImageCapture.CaptureToBuffer\n )\n\n self.capture.imageCaptured.connect(self.storeLastFrame)\n\n self.current_camera_name = self.available_cameras[i].description()\n self.save_seq = 0\n else:\n print(\"No camera.\")\n\n def getLastFrame(self):\n if self.capture:\n imageID = self.capture.capture()\n frame = self.qImageToMat(self.lastImage.mirrored())\n return frame\n else:\n return None\n\n def storeLastFrame(self, idImg: int, preview: QtGui.QImage):\n self.lastImage = preview\n self.lastID = idImg\n\n def qImageToMat(self, incomingImage):\n incomingImage.save(self.tmpUrl, \"png\")\n mat = cv2.imread(self.tmpUrl)\n return mat\n\n def qImageToMat_alt(self, incomingImage):\n \"\"\" Converts a QImage into an opencv MAT format \"\"\"\n\n # Convert to 32-bit RGBA with solid opaque alpha\n # and get the pointer numpy will want.\n #\n # Cautions:\n # 1. I think I remember reading that PyQt5 only has\n # constBits() and PySide2 only has bits(), so you may\n # need to do something like `if hasattr(...)` for\n # portability.\n #\n # 2. Format_RGBX8888 is native-endian for your\n # platform and I suspect this code, as-is,\n # would break on a big-endian system.\n im_in = incomingImage.convertToFormat(QtGui.QImage.Format_RGBX8888)\n ptr = im_in.constBits()\n ptr.setsize(im_in.byteCount())\n\n # Convert the image into a numpy array in the\n # format PyOpenCV expects to operate on, explicitly\n # copying to avoid potential lifetime bugs when it\n # hasn't yet proven a performance issue for my uses.\n cv_im_in = np.array(ptr, copy=True).reshape(im_in.height(), im_in.width(), 4)\n cv_im_in = cv2.cvtColor(cv_im_in, cv2.COLOR_BGRA2RGB)\n\n return cv_im_in\n\n\nclass ImageWidget(QtWidgets.QLabel):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.parent = parent\n self.setScaledContents(True)\n self.setMinimumWidth(100)\n\n def hasHeightForWidth(self):\n return self.pixmap() is not None\n\n def heightForWidth(self, w):\n if self.pixmap():\n return int(w * (self.pixmap().height() / self.pixmap().width()))\n\n\nclass VideoViewerWidget(QtWidgets.QWidget):\n changeCameraID_signal = pyqtSignal()\n stylesheet = \"\"\"\n #Video_viewer {\n background-color: white;\n border-radius: 3px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QLabel {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QPushButton {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: white;\n padding: 3px;\n }\n #OpenPose_button {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: #ffcccc;\n padding: 3px;\n }\n QComboBox {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: white;\n }\n QPushButton:hover {\n border-color: rgb(139, 173, 228);\n }\n QPushButton:pressed {\n background: #cbcbcb;\n }\n #OpenPose_button:checked {\n background: #ccffcc;\n }\n \"\"\"\n\n def __init__(self, availableCameras):\n super().__init__()\n self.availableCameras = availableCameras\n\n ## Widget style\n self.setObjectName(\"Video_viewer\")\n self.setAttribute(QtCore.Qt.WA_StyledBackground, True)\n self.setStyleSheet(self.stylesheet)\n\n effect = QtWidgets.QGraphicsDropShadowEffect(self)\n effect.setBlurRadius(10)\n effect.setOffset(0, 0)\n effect.setColor(QtCore.Qt.gray)\n self.setGraphicsEffect(effect)\n\n ## Widgets initialisation\n self.cameraFeed = ImageWidget(self)\n\n self.infoLabel = QtWidgets.QLabel(\"No info\")\n\n self.refreshButton = QtWidgets.QPushButton(\n \"Refresh camera list\",\n cursor=QtCore.Qt.PointingHandCursor,\n toolTip=\"Update available camera list\",\n )\n self.refreshButton.resize(self.refreshButton.sizeHint())\n\n self.camera_selector = QtWidgets.QComboBox(cursor=QtCore.Qt.PointingHandCursor)\n self.camera_selector.addItems([c.description() for c in self.availableCameras])\n\n ## Widget structure\n self.layout = QtWidgets.QGridLayout(self)\n self.setLayout(self.layout)\n\n if OPENPOSE_LOADED:\n self.layout.addWidget(self.cameraFeed, 0, 0, 1, 3)\n self.layout.addWidget(self.refreshButton, 2, 0, 1, 1)\n self.layout.addWidget(self.camera_selector, 2, 1, 1, 1)\n self.layout.addWidget(self.infoLabel, 1, 0, 1, 3)\n else:\n label = QtWidgets.QLabel(\n \"Video analysis impossible:\\nCannot import OpenPose from \"\n + str(OPENPOSE_PATH)\n + \",\\nchange path in pose_classification_kit\\config.py if needed.\"\n )\n self.layout.addWidget(label, 0, 0, 1, 1)\n\n @pyqtSlot(np.ndarray)\n def setFrame(self, frame: np.ndarray):\n image = qimage2ndarray.array2qimage(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n self.currentPixmap = QtGui.QPixmap.fromImage(image)\n self.cameraFeed.setPixmap(\n self.currentPixmap.scaled(\n self.cameraFeed.size(),\n QtCore.Qt.KeepAspectRatio,\n QtCore.Qt.SmoothTransformation,\n )\n )\n\n # def setVideoSize(self, width: int, height: int):\n # self.cameraFeed.setFixedSize(width, height)\n\n def setInfoText(self, info: str):\n if info:\n self.infoLabel.setText(info)\n else:\n self.infoLabel.setText(\"\")\n"
},
{
"alpha_fraction": 0.5486710071563721,
"alphanum_fraction": 0.5652287602424622,
"avg_line_length": 34.63664627075195,
"blob_id": "e68a3fd9291ddd025dde13812579a686e21c24d1",
"content_id": "a4d5ce135511877547477288a53130049e4616be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11475,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 322,
"path": "/pose_classification_kit/src/keypoints_analysis/body_analysis.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "from ..imports.qt import QtWidgets, QtCore\nfrom ..imports.tensorflow import tf, TF_LOADED\nfrom ..imports.openpose import OPENPOSE_LOADED\n\nif OPENPOSE_LOADED:\n from ..imports.openpose import op\nfrom .dynamic_bar_graph_widget import BarGraphWidget\nfrom .classifier_selection_widget import ClassifierSelectionWidget\nfrom ...datasets.body_models import (\n BODY18,\n BODY18_FLAT,\n BODY25,\n BODY25_FLAT,\n BODY25_to_BODY18_indices,\n)\n\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib import figure, lines\nfrom matplotlib.pyplot import cm\n\n\nclass BodyPlotWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n layout = QtWidgets.QVBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n self.canvas = FigureCanvas(figure.Figure(figsize=(5, 3)))\n layout.addWidget(self.canvas)\n self.setMinimumHeight(620)\n self.setMinimumWidth(560)\n\n self.ax = self.canvas.figure.subplots()\n self.ax.set_xlim([-1.0, 1.0])\n self.ax.set_ylim([-1.0, 1.0])\n self.ax.set_aspect(\"equal\")\n\n numPartPairs = len(BODY25.pairs)\n color_map = cm.get_cmap(\"hsv\", numPartPairs)\n self.pairLines = [\n lines.Line2D([], [], color=color_map(i)) for i in range(numPartPairs)\n ]\n\n handles = [\n lines.Line2D([0], [0], color=color_map(i), lw=1, ls=\"-\", label=l)\n for i, l in enumerate(\n [\n \"Torso\",\n \"Shoulder (right)\",\n \"Shoulder (left)\",\n \"Arm (right)\",\n \"Forearm (right)\",\n \"Arm (left)\",\n \"Forearm (left)\",\n \"Hip (right)\",\n \"Thigh (right)\",\n \"Leg (right)\",\n \"Hip (left)\",\n \"Thigh (left)\",\n \"Leg (left)\",\n \"Neck\",\n \"Eye (right)\",\n \"Ear (right)\",\n \"Eye (left)\",\n \"Ear (left)\",\n \"Foot (left)\",\n \"Toe (left)\",\n \"Heel (left)\",\n \"Foot (right)\",\n \"Toe (right)\",\n \"Heel (right)\",\n ]\n )\n ]\n self.ax.legend(\n handles=handles,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.3),\n prop={\"size\": 7},\n borderaxespad=1,\n ncol=4,\n )\n self.canvas.figure.subplots_adjust(bottom=0.2)\n\n for line in self.pairLines:\n self.ax.add_line(line)\n\n def plotBody(self, bodyKeypoints, accuracy: int):\n if self.isBodyData(bodyKeypoints):\n for i, line in enumerate(self.pairLines):\n keypoints_1, keypoints_2 = BODY25.pairs[i]\n if (\n bodyKeypoints[2][keypoints_1] == 0.0\n or bodyKeypoints[2][keypoints_2] == 0.0\n ):\n line.set_data([], [])\n else:\n line.set_data(\n [bodyKeypoints[0][keypoints_1], bodyKeypoints[0][keypoints_2]],\n [bodyKeypoints[1][keypoints_1], bodyKeypoints[1][keypoints_2]],\n )\n # line.set_data(list(data[i][0]), list(data[i][1]))\n\n self.ax.set_title(\n \"Accuracy: \" + str(accuracy), fontsize=12, color=\"#454545\"\n )\n else:\n self.clear()\n self.ax.set_title(\"\")\n self.canvas.draw()\n\n def clear(self):\n for line in self.pairLines:\n line.set_data([], [])\n self.canvas.draw()\n\n def isBodyData(self, keypoints):\n if type(keypoints) == np.ndarray:\n if keypoints.shape == (3, 25):\n return True\n return False\n\n\nclass BodyAnalysisWidget(QtWidgets.QGroupBox):\n stylesheet = \"\"\"\n #Large_Label {\n font-size: 26px;\n color: #9500ff;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n\n QSplitter::handle {\n color: #cbcbcb;\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n }\n \"\"\"\n\n def __init__(self):\n super().__init__((\"Full body\"))\n self.setStyleSheet(self.stylesheet)\n self.showInput = True\n self.classOutputs = []\n self.modelClassifier = None\n self.currentBodyModel = None\n self.currentPrediction = \"\"\n\n self.layout = QtWidgets.QVBoxLayout(self)\n self.setLayout(self.layout)\n\n self.predictionLabel = QtWidgets.QLabel(self)\n self.predictionLabel.setObjectName(\"Large_Label\")\n self.layout.addWidget(self.predictionLabel)\n self.predictionLabel.setAlignment(QtCore.Qt.AlignCenter)\n\n self.classGraphWidget = BarGraphWidget()\n self.bodyGraphWidget = BodyPlotWidget()\n\n self.graphSplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)\n self.graphSplitter.setChildrenCollapsible(False)\n self.graphSplitter.addWidget(self.bodyGraphWidget)\n self.graphSplitter.addWidget(self.classGraphWidget)\n self.graphSplitter.setStretchFactor(0, 2)\n self.graphSplitter.setStretchFactor(1, 1)\n\n self.layout.addWidget(self.graphSplitter)\n self.layout.setStretch(0, 0)\n self.layout.setStretch(1, 1)\n\n def setClassifierModel(self, model, classOutputs): # model:tf.keras.models\n self.modelClassifier = model\n if model != None:\n self.modelInputShape = model.layers[0].input_shape[1:]\n if self.modelInputShape[0] == 25:\n self.currentBodyModel = BODY25\n elif self.modelInputShape[0] == 18:\n self.currentBodyModel = BODY18\n elif self.modelInputShape[0] == 50:\n self.currentBodyModel = BODY25_FLAT\n elif self.modelInputShape[0] == 36:\n self.currentBodyModel = BODY18_FLAT\n else:\n self.currentBodyModel = None\n else:\n self.modelInputShape = None\n self.currentBodyModel = None\n print(self.modelInputShape)\n self.classOutputs = classOutputs\n self.classGraphWidget.changeCategories(self.classOutputs)\n\n def drawBody(self, bodyKeypoints: np.ndarray, accuracy: float):\n \"\"\"Draw keypoints of a body pose in the widget if showInput==True.\n\n Args:\n keypoints (np.ndarray((3,25),float)): Coordinates x, y and the accuracy score for each 21 key points.\n accuracy (float): Global accuracy of detection of the pose.\n \"\"\"\n if self.showInput:\n # self.bodyGraphWidget.setTitle('Detection accuracy: ' + str(accuracy))\n self.updatePredictedClass(bodyKeypoints)\n self.bodyGraphWidget.plotBody(bodyKeypoints, accuracy)\n\n def updatePredictedClass(self, keypoints: np.ndarray):\n \"\"\"Draw keypoints of a body pose in the widget.\n\n Args:\n keypoints (np.ndarray((3,21),float)): Coordinates x, y and the accuracy score for each 21 key points.\n \"\"\"\n\n prediction = [0 for i in self.classOutputs]\n title = \"\"\n if type(keypoints) != type(None):\n if self.modelClassifier is not None:\n\n if self.currentBodyModel == BODY25:\n inputData = keypoints[:2].T\n elif self.currentBodyModel == BODY25_FLAT:\n inputData = np.concatenate(keypoints[:2].T, axis=0)\n elif self.currentBodyModel == BODY18:\n inputData = keypoints.T[BODY25_to_BODY18_indices][:, :2]\n elif self.currentBodyModel == BODY18_FLAT:\n inputData = np.concatenate(\n keypoints.T[BODY25_to_BODY18_indices][:, :2], axis=0\n )\n\n prediction = self.modelClassifier.predict(np.array([inputData]))[0]\n self.currentPrediction = self.classOutputs[np.argmax(prediction)]\n title = self.currentPrediction\n\n self.classGraphWidget.updateValues(np.array(prediction))\n self.setPredictionText(title)\n\n def newModelLoaded(self, urlModel: str, modelInfo: dict, bodyID: int):\n if TF_LOADED:\n if urlModel == \"None\":\n self.setClassifierModel(None, [])\n else:\n if bodyID == 2: # Check if classifier for body poses (not hands)\n model = tf.keras.models.load_model(urlModel)\n nbrClass = model.layers[-1].output_shape[1]\n if (\n modelInfo\n and modelInfo.get(\"labels\")\n and len(modelInfo.get(\"labels\")) == nbrClass\n ):\n classOutputs = modelInfo.get(\"labels\")\n else:\n classOutputs = [str(i) for i in range(1, nbrClass + 1)]\n self.setClassifierModel(model, classOutputs)\n\n def getCurrentPrediction(self) -> str:\n return self.currentPrediction\n\n def setPredictionText(self, prediction: str):\n self.predictionLabel.setText(prediction)\n\n\nclass BodyClassifierWidget(QtWidgets.QWidget):\n stylesheet = \"\"\"\n #Body_classifier {\n background-color: white;\n border-radius: 3px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QGroupBox {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QLabel {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QPushButton {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: white;\n padding: 3px;\n }\n QComboBox {\n border: 1px solid #cbcbcb;\n border-radius: 3px;\n font-size: 16px;\n background: white;\n }\n QPushButton:hover {\n border-color: rgb(139, 173, 228);\n }\n QPushButton:pressed {\n background: #cbcbcb;\n }\n \"\"\"\n\n def __init__(self):\n super().__init__()\n ## Widget style\n self.setObjectName(\"Body_classifier\")\n self.setAttribute(QtCore.Qt.WA_StyledBackground, True)\n self.setStyleSheet(self.stylesheet)\n\n effect = QtWidgets.QGraphicsDropShadowEffect(self)\n effect.setBlurRadius(10)\n effect.setOffset(0, 0)\n effect.setColor(QtCore.Qt.gray)\n self.setGraphicsEffect(effect)\n\n ## Structure\n self.layout = QtWidgets.QVBoxLayout(self)\n self.setLayout(self.layout)\n\n self.classifierWidget = ClassifierSelectionWidget(\n parent=self, bodyClassification=True\n )\n self.bodyAnalysis = BodyAnalysisWidget()\n self.classifierWidget.newClassifierModel_Signal.connect(\n self.bodyAnalysis.newModelLoaded\n )\n\n self.layout.addWidget(self.bodyAnalysis)\n self.layout.addWidget(self.classifierWidget)\n self.layout.setStretch(0, 1)\n self.layout.setStretch(1, 0)\n"
},
{
"alpha_fraction": 0.6631021499633789,
"alphanum_fraction": 0.7320274114608765,
"avg_line_length": 49.18048858642578,
"blob_id": "36e6e75bfac3df28e86447bae26d50c7eb85a9f1",
"content_id": "0bc1046591e4bf9f087ca410db9e963fb8dc7bed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 20693,
"license_type": "permissive",
"max_line_length": 1112,
"num_lines": 410,
"path": "/README.md",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "<h1 align = \"center\"> Pose Classification Kit </h1>\n\n[![PyPI][PyPI-shield]][PyPI-url]\n[![PyV][PyV-shield]][PyV-url]\n[![lint][lint-shield]][lint-url]\n[![linkedin][linkedin-shield]][linkedin-url]\n\n<p align=\"center\">\n <img src=\"https://github.com/ArthurFDLR/pose-classification-kit/blob/master/.github/markdown/pck-app.PNG?raw=true\" alt=\"Banner\" width=\"100%\" style=\"border-radius: 5px;\">\n</p>\n\nThis Python package focus on the deployment of gesture control systems. It ease dataset creation, models evaluation, and processing pipeline deployment. The critical element in the proposed processing architecture is the intermediate representation of human bodies as key points to perform efficient classification. In addition to the main application, the package contains two datasets for body/hands pose classificaiton, several classification models, and data augmentation tools that can be accessed through an API. Feel free to check-out the [**drone-gesture-control repository**](https://github.com/ArthurFDLR/drone-gesture-control) for a deployment example on Jetson Nano using this package.\n\n\n- [Getting Started](#getting-started)\n - [Step 1 - Install the package](#step-1---install-the-package)\n - [Using PyPi](#using-pypi)\n - [From source](#from-source)\n - [Step 2 - Install OpenPose](#step-2---install-openpose)\n - [Step 3 - Launch application](#step-3---launch-application)\n - [Step 4 - Create new classification models](#step-4---create-new-classification-models)\n- [Demonstrations](#demonstrations)\n- [User guide](#user-guide)\n - [Real-time pose classification](#real-time-pose-classification)\n - [Create and manipulate datasets](#create-and-manipulate-datasets)\n - [Additional scripts](#additional-scripts)\n- [Documentation](#documentation)\n - [Body datasets](#body-datasets)\n - [Data augmentation](#data-augmentation)\n- [License](#license)\n\n## Getting Started\n\n### Step 1 - Install the package\n\n#### Using PyPi\n\nRun the following command to install the whole package in the desired Python environment:\n \n ```\n pip install pose-classification-kit[app]\n ```\n\nIf you don't plan to use the application but just want access to the datasets and pre-trained models:\n\n ```\n pip install pose-classification-kit\n ```\n\n#### From source\n\nEnsure that [`Poetry`](https://poetry.eustace.io/) is installed for Python 3.7 and above on your system.\n\n1. Git clone the repository\n \n ```\n git clone https://github.com/ArthurFDLR/pose-classification-kit.git\n cd pose-classification-kit\n ```\n\n2. Create an adequate `venv` virtual environment\n \n ```\n python -m poetry install\n ```\n\n### Step 2 - Install OpenPose\n\nThe dataset creation and real-time model evaluation application heavily rely on the pose estimation system [**OpenPose**](https://github.com/CMU-Perceptual-Computing-Lab/openpose). It must be installed on your system to allow real-time gesture classification. This step is not requiered if you don't plan to use the application.\n\n1. Follow [OpenPose installation instructions](https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/doc/installation).\n\n2. Once the installation is completed, change the variable `OPENPOSE_PATH` ( [`.\\pose-classification-kit\\config.py`](https://github.com/ArthurFDLR/pose-classification-kit/blob/master/pose_classification_kit/config.py)) to the location of the OpenPose installation folder on your system.\n\n### Step 3 - Launch application\n\nYou should now be able to run the application if you installed all optionnal dependancies. See the usage section about how to use the app.\n```\npose-classification-app\n```\n\n### Step 4 - Create new classification models\n\nThe [`.\\examples`](https://github.com/ArthurFDLR/pose-classification-kit/blob/master/examples) folder contains Jupyter Notebook detailing the use of the API to create new classification models. Note that these Notebooks can be executed on Google Colab.\n\n## Demonstrations\n\n<a href=\"https://youtu.be/FK-1G749cIo\"><p align=\"center\">\n <img src=\"https://github.com/ArthurFDLR/pose-classification-kit/blob/master/.github/markdown/video_embed_1.PNG?raw=true\" alt=\"Demonstration video 1\" width=\"70%\" style=\"border-radius: 5px;\">\n</p></a>\n\n<a href=\"https://youtu.be/FZAUPmKiSXg\"><p align=\"center\">\n <img src=\"https://github.com/ArthurFDLR/pose-classification-kit/blob/master/.github/markdown/video_embed_2.PNG?raw=true\" alt=\"Demonstration video 2\" width=\"70%\" style=\"border-radius: 5px;\">\n</p></a>\n\n## User guide\n\n### Real-time pose classification\n\nThe video stream of the selected camera is fed to OpenPose at all times. The analysis results are displayed on the left side of the application. You have to choose one of the available models in the drop-down at the bottom of the analysis pannel. Keypoints extracted from the video by OpenPose are automatically normalized and fed to the classifier.\n\n### Create and manipulate datasets\n\nFirst, you either have to load or create a new set of samples for a specific label and hand side. To do so, respectively choose *Open (Ctrl+O)* or *Create new (Ctrl+N)* in *Dataset* of the menu bar. You have to specify the hand side, the label, and the newly created samples set' accuracy threshold. A configuration window will ask for the label and the newly created samples set's accuracy threshold in case of creating a new class. The accuracy threshold defines the minimum accuracy of hand keypoints detection from OpenPose of any sample in the set. This accuracy is displayed on top of the keypoints graph.\n\nNow that a set is loaded in the application, you can record new samples from your video feed or inspect the set and delete inadequate samples. When your done, save the set through *Dataset -> Save (Ctrl+S)*.\n\n### Additional scripts\n\nSome functionalities are currently unavailable through the GUI:\n- You can export all dataset samples from [`.\\pose_classification_kit\\datasets\\Body`](https://github.com/ArthurFDLR/pose-classification-kit/tree/master/pose_classification_kit/datasets/Body) and [`.\\pose_classification_kit\\datasets\\Hands`](https://github.com/ArthurFDLR/pose-classification-kit/tree/master/pose_classification_kit/datasets/Hands) in two respective CSV files. \n ```\n export-datasets\n ```\n- You can generate videos similar to [this one](https://youtu.be/FK-1G749cIo) ([`.\\pose-classification-kit\\scripts\\video_creation.py`](https://github.com/ArthurFDLR/OpenHand-App/tree/master/pose-classification-kit/scripts/video_creation.py) might need some modification to fit your use case).\n \n 🚧 Currently not functional 🚧\n \n ```\n video-overlay\n ```\n\n## Documentation\n\n### Body datasets\n\nThere is a total of 20 body dataset classes which contains between 500 and $600$ samples each for a total of 10680 entries. Even if the number of samples from one class to the other varies in the raw dataset, the API yields a balanced dataset of 503 samples per class. Also, by default, 20% of these are reserved for final testing of the model. Each entry in the dataset is an array of 25 2D coordinates. The mapping of these keypoints follows the BODY25 body model. We created the dataset using the BODY25 representation as it is one of the most comprehensive standard body models. However, some pose estimation models, such as the one used on the Jetson Nano, use an 18 keypoints representation (BODY18). The seven missing keypoints do not strongly influence classification as 6 of them are used for feet representation, and the last one is a central hip keypoint. Still, the dataset must be converted to the BODY18 representation. This is done by reindexing the samples based on the comparison of the mapping of both body models. You can choose which body model to use when importing the dataset with the API.\n\n<p align=\"center\">\n <img src=\"https://github.com/ArthurFDLR/pose-classification-kit/blob/master/.github/markdown/class_body.png?raw=true\" alt=\"Full body classes\" width=\"80%\" style=\"border-radius: 5px;\">\n</p>\n\n<p align=\"center\">\n <img src=\"https://github.com/ArthurFDLR/pose-classification-kit/blob/master/.github/markdown/body_models.png?raw=true\" alt=\"Body models\" width=\"80%\" style=\"border-radius: 5px;\">\n</p>\n\n### Data augmentation\n\nThe data augmentation tool currently support the following operations:\n\n- **Scaling**: a random scaling factor drawn from a normal distribution of mean 0 and standard deviation σₛ is applied to all sample coordinates.\n- **Rotation**: a rotation of an angle randomly drawn from a normal distribution of mean 0 and standard deviation σᵣ is applied to the sample.\n- **Noise**: Gaussian noise of standard deviation σₙ is added to coordinates of the sample.\n- **Remove keypoints**: a pre-defined or random list of keypoints are removed (coordinates set to 0) from the sample.\n\n<details><summary>See example</summary>\n<p>\n\n\n<table>\n <tr>\n <td>Augmentation Ratio</td>\n <td>σₛ</td>\n <td>σᵣ</td>\n <td>σₙ</td>\n <td>Remove keypoints</td>\n </tr>\n <tr>\n <td>10%</td>\n <td>0.08</td>\n <td>0.0</td>\n <td>0.0</td>\n <td>None</td>\n </tr>\n <tr>\n <td>10%</td>\n <td>0.0</td>\n <td>10.0</td>\n <td>0.0</td>\n <td>None</td>\n </tr>\n <tr>\n <td>15%</td>\n <td>0.0</td>\n <td>0.0</td>\n <td>0.03</td>\n <td>Legs</td>\n </tr>\n <tr>\n <td>15%</td>\n <td>0.0</td>\n <td>0.0</td>\n <td>0.03</td>\n <td>Legs & Hip</td>\n </tr>\n <tr>\n <td>20%</td>\n <td>0.0</td>\n <td>0.0</td>\n <td>0.03</td>\n <td>2 random</td>\n </tr>\n</table>\n\n```python\nfrom pose_classification_kit.datasets import BODY18, bodyDataset, dataAugmentation\n\ndataset = bodyDataset(testSplit=.2, shuffle=True, bodyModel=BODY18)\nx_train = dataset['x_train']\ny_train = dataset['y_train_onehot']\nx, y = [x_train], [y_train]\n\n# Scaling augmentation\nx[len(x):],y[len(y):] = tuple(zip(dataAugmentation(\n x_train, y_train,\n augmentation_ratio=.1,\n scaling_factor_standard_deviation=.08,\n)))\n\n# Rotation augmentation\nx[len(x):],y[len(y):] = tuple(zip(dataAugmentation(\n x_train, y_train,\n augmentation_ratio=.1,\n rotation_angle_standard_deviation=10,\n)))\n\n# Upper-body augmentation\nlowerBody_keypoints = np.where(np.isin(BODY18.mapping,[\n \"left_knee\", \"right_knee\", \"left_ankle\", \"right_ankle\"\n]))[0]\nx[len(x):],y[len(y):] = tuple(zip(dataAugmentation(\n x_train, y_train,\n augmentation_ratio=.15,\n remove_specific_keypoints=lowerBody_keypoints,\n random_noise_standard_deviation=.03\n)))\nlowerBody_keypoints = np.where(np.isin(BODY18.mapping,[\n \"left_knee\", \"right_knee\", \"left_ankle\", \"right_ankle\", \"left_hip\", \"right_hip\",\n]))[0]\nx[len(x):],y[len(y):] = tuple(zip(dataAugmentation(\n x_train, y_train,\n augmentation_ratio=.15,\n remove_specific_keypoints=lowerBody_keypoints,\n random_noise_standard_deviation=.03\n))) \n\n# Random partial input augmentation\nx[len(x):],y[len(y):] = tuple(zip(dataAugmentation(\n x_train, y_train,\n augmentation_ratio=.2,\n remove_rand_keypoints_nbr=2,\n random_noise_standard_deviation=.03\n)))\n\nx_train_augmented = np.concatenate(x, axis=0)\ny_train_augmented = np.concatenate(y, axis=0)\n```\n</p>\n</details>\n\n\n<!-- LICENSE -->\n## License\n\nDistributed under the MIT License. See [`LICENSE`](https://github.com/ArthurFDLR/pose-classification-kit/blob/main/LICENSE) for more information.\n\n\n<!-- MARKDOWN LINKS & IMAGES -->\n[PyPI-shield]: https://img.shields.io/pypi/v/pose-classification-kit?style=for-the-badge\n[PyPI-url]: https://pypi.org/project/pose-classification-kit/\n\n[PyV-shield]: https://img.shields.io/badge/python-3.7%20%7C%203.8%20%7C%203.9-blue?style=for-the-badge\n[PyV-url]: https://github.com/ArthurFDLR/pose-classification-kit/blob/master/pyproject.toml\n\n[lint-shield]: https://img.shields.io/badge/code%20style-black-000000.svg?style=for-the-badge\n[lint-url]: https://github.com/psf/black\n\n[license-shield]: https://img.shields.io/github/license/ArthurFDLR/OpenHand-Classifier?style=for-the-badge\n[license-url]: https://github.com/ArthurFDLR/OpenHand-Classifier/blob/master/LICENSE\n\n[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555\n[linkedin-url]: https://linkedin.com/in/arthurfdlr/\n\n<!--\n## Under the hood\n\n### Features extraction\n\nThe 21 hand keypoints (2D) used as input for this classifier are produced by OpenPose. The hand output format is as follow:\n\n<img src=\"https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/.github/markdown/keypoints_hand.png\" width=\"200\">\n\nMore information are available [here](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/02_output.md). Please note that even though OpenHand focuses on hand keypoints, OpenPose requires the whole body to be analyzed to generate hand data. Furthermore, keypoints coordinates are given in the frame of reference of the image fed to OpenPose. Thus, the coordinates have to be normalized.\nI addition to x, y coordinates, the accuracy of detection of each key points is provided.\n\n### Keypoints normalization\n\nOpenPose outputs have to be formatted and normalized before classification analysis. Coordinates are normalized relative to finger length and the center of gravity of the hand.\n\n* **Scaling:** First, the length of each fingers - defined as a set of lines of the same color, see above - is calculated. The euclidian distances of all segments of a finger are sumed *- e.g.* <img src=\"https://render.githubusercontent.com/render/math?math=Thumb\\_length = \\sum_{i=0}^{3} d(\\boldsymbol{k_i}, \\boldsymbol{k_{i%2B1}})\">.\nThen, every coordinates composing the hand are divided by the greater finger length.\n\n* **Centering:** Keypoints are centered relative to the center of mass of the hand which, in this case, is simply defined as <img src=\"https://render.githubusercontent.com/render/math?math=(\\bar{\\boldsymbol{k^x}}, \\bar{\\boldsymbol{k^y}})\">.\n\n<details><summary>Click to show code</summary>\n<p>\n\n```python\nhandKeypoints = np.array(op.Datum().handKeypoints)[handID, self.personID]\n\nlengthFingers = [np.sqrt((handKeypoints[0,0] - handKeypoints[i,0])**2 + (handKeypoints[0,1] - handKeypoints[i,1])**2) for i in [1,5,9,13,17]] #Initialized with the length of the first segment of each fingers.\nfor i in range(3): #Add length of other segments for each fingers\n for j in range(len(lengthFingers)):\n lengthFingers[j] += np.sqrt((handKeypoints[1+j*4+i+1, 0] - handKeypoints[1+j*4+i, 0])**2 + (handKeypoints[1+j*4+i+1, 1] - handKeypoints[1+j*4+i, 1])**2)\nnormMax = max(lengthFingers)\n\nhandCenterX = handKeypoints.T[0].sum() / handKeypoints.shape[0]\nhandCenterY = handKeypoints.T[1].sum() / handKeypoints.shape[0]\noutputArray = np.array([(handKeypoints.T[0] - handCenterX)/normMax,\n -(handKeypoints.T[1] - handCenterY)/normMax,\n (handKeypoints.T[2])])\n```\n</p>\n</details>\n\n<img src=\"https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/.github/markdown/formated_hand.png\" width=\"400\">\n\n### Dataset creation - [*11090 samples for 27 categories*](https://github.com/ArthurFDLR/OpenHand-Classifier/tree/master/Datasets)\n\nThe dataset is composed of several classes consisting of two text files, one for each hand. The dataset is structured as follow:\n\n```\n.\\AppHandClassifier\\Datasets\n│\n└───class_label_1\n│ └───left_hand\n│ │ data.txt\n│ └───right_hand\n│ data.txt\n│\n└───class_label_2\n│ └───left_hand\n│ │ data.txt\n│ └───right_hand\n│ data.txt\n.\n.\n```\n\nThe first line of a *data.txt* file contains the set's characteristics:\n- Class label\n- Hand identifier (0 for the left hand, 1 for the right hand)\n- The minimum accuracy of detection\n\nTo add comments, begin a line with *##*. A sample is (at least) composed of 3 lines: a header giving the detection accuracy, x coordinates, y coordinates. \n\n<details><summary>Click to show examples - First lines of 'Super' set for right hand</summary>\n<p>\n\n```\nSuper,1,13.0\n## Data generated the 2020-07-28 labeled Super (right hand) with a global accuracy higher than 13.0, based on OpenPose estimation.\n## Data format: Coordinates x, y, and accuracy of estimation a\n\n#14.064389\nx:-0.47471642 -0.38345036 -0.27814367 -0.17283674 -0.16581643 -0.07455035 0.24136995 0.26243138 0.18520646 -0.060509484 0.24136995 0.17116559 0.05883807 -0.095611796 0.22732908 0.14308357 0.030756325 -0.10965267 0.1220224 0.10798126 0.02373602\ny:-0.120350584 0.12536536 0.38512218 0.6238177 0.8203904 0.13238579 0.12536536 0.097283475 0.09026304 -0.07822783 -0.043125518 -0.029084647 -0.015043774 -0.2467187 -0.19757552 -0.16247334 -0.14843246 -0.3801074 -0.36606652 -0.30990276 -0.30288246\na:0.4513099 0.52159405 0.73779285 0.7362725 0.8151489 0.8092662 0.74224406 0.4387765 0.23850155 0.797209 0.79372936 0.59578335 0.44275257 0.81076413 0.9635796 0.647649 0.5396069 0.80517197 0.8936012 0.7543843 0.52925146\n\n#15.550782\nx:-0.4933955 -0.3817585 -0.23523489 -0.109643176 -0.053824674 0.008971046 0.23224507 0.13456275 0.043857645 0.001993833 0.24619977 0.13456275 0.015948527 -0.025915554 0.22526786 0.113630846 0.001993833 -0.053824674 0.12060806 0.07874425 -0.0049836473\ny:-0.113298275 0.13090765 0.36813638 0.5914105 0.779798 0.109975755 0.102998406 0.137885 0.14486235 -0.07841181 -0.06445711 -0.0225933 -0.015615954 -0.23888998 -0.19702616 -0.16213956 -0.16911678 -0.3575045 -0.350527 -0.30168596 -0.2947085\na:0.59823513 0.6402868 0.81965464 0.87657 0.9046949 0.83729064 0.8742925 0.47936943 0.43094704 0.82496655 0.87384015 0.65166384 0.5838103 0.8670102 0.9759184 0.6943432 0.5715823 0.81283325 0.8954963 0.71702033 0.62095624\n```\n\n</p>\n</details>\n\nNote that a training set of 150 samples per hand and per pose seems enough to yield good classification results. A couple of minutes of recording with the provided tool is enough to generate enough data for a pose.\n\n### Pose classifier models\n\nClassification models available in the application are stored in [`.\\Models`](https://github.com/ArthurFDLR/OpenHand-App/tree/master/Models). Each model sub-folder contains two HDF5 files containing the model's architecture and weights values. While both models usually share the same architecture, they are trained to analyze the right or the left hand. Besides, a text file `class.txt` provides labels associated with the classifiers' one-hot encoded output.\n\n```\n.\\AppHandClassifier\\Models\n│\n└───model_1\n| class.txt\n│ model_1_left.h5\n| model_1_right.h5\n│\n└───model_2\n| class.txt\n│ model_2_left.h5\n| model_2_right.h5\n.\n.\n```\n\nSee [**OpenHand-Models** repository](https://github.com/ArthurFDLR/OpenHand-Models) for more details about model creation.\n\n## User guide\n\n### Real-time pose classification\n\nThe video feed of the selected camera is fed to OpenPose at all times. The analysis results are displayed on the left side of the application. You have to choose one of the available models in the drop-down at the bottom of the hand-analysis window. Hand keypoints extracted from the video feed by OpenPose are automatically normalized and fed to the classifier.\n\n### Create and manipulate datasets\n\nFirst, you either have to load or create a new set of samples for a specific label and hand side. To do so, respectively choose *Open (Ctrl+O)* or *Create new (Ctrl+N)* in *Dataset* of the menu bar. You have to specify the hand side, the label, and the newly created samples set' accuracy threshold. The accuracy threshold defines the minimum accuracy of hand keypoints detection from OpenPose of any sample in the set. This accuracy is displayed on top of hand keypoints graphs.\n\nNow that a set is loaded in the application, you can record new samples from your video feed or inspect the set and delete inadequate samples. When your done, save the set through *Dataset -> Save (Ctrl+S)*.\n\n### Additional scripts\n\nSome functionalities are currently unavailable through the GUI:\n- You can export all dataset samples from [`.\\Dataset`](https://github.com/ArthurFDLR/OpenHand-App/tree/master/Dataset) in a single CSV file - `make dataset-csv` (or `poetry run python .\\pose-classification-kit\\scripts\\dataset_export.py`)\n- You can generate videos similar to [this one](https://youtu.be/FK-1G749cIo) ([`.\\pose-classification-kit\\scripts\\video_creation.py`](https://github.com/ArthurFDLR/OpenHand-App/tree/master/pose-classification-kit/scripts/video_creation.py) might need some modification to fit your use case) - `make video-overlay` (or `poetry run python .\\pose-classification-kit\\scripts\\video_creation.py`)\n-->"
},
{
"alpha_fraction": 0.4970930218696594,
"alphanum_fraction": 0.5052854418754578,
"avg_line_length": 26.420289993286133,
"blob_id": "6306ac1467e0355d444e76f54306de4db4ef6d47",
"content_id": "66d46d6cf3cd0fabfbc8e1eb068234fe577cfda9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3784,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 138,
"path": "/pose_classification_kit/scripts/dataset_export.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import json\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom ..config import DATASETS_PATH\n\n\ndef loadFile(filePath: Path, shuffle: bool = True):\n data_out = []\n accuracy_out = []\n with open(filePath) as f:\n data = json.load(f)\n for entry in data[\"data\"]:\n data_out.append([entry[\"x\"], entry[\"y\"], entry[\"a\"]])\n accuracy_out.append(entry[\"detection_accuracy\"])\n\n if shuffle:\n index = np.arange(data_out.shape[0])\n np.random.shuffle(index)\n data_out = data_out[index]\n accuracy_out = accuracy_out[index]\n\n return np.array(data_out), np.array(accuracy_out)\n\n\ndef generateHandDataset(labels, dataset_path):\n data = {\"label\": [], \"hand\": [], \"accuracy\": []}\n for i in range(21):\n data.update({\"x{}\".format(i): [], \"y{}\".format(i): []})\n\n for label in labels:\n for hand in [0, 1]:\n fileName = label + [\"_left\", \"_right\"][hand] + \"_hand.json\"\n filePath = dataset_path / fileName\n\n if filePath.is_file():\n list_data, list_accuracy = loadFile(filePath, False)\n data[\"label\"] += [label] * list_data.shape[0]\n data[\"hand\"] += [\"left\" if hand == 0 else \"right\"] * list_data.shape[0]\n data[\"accuracy\"] += list(list_accuracy)\n\n for i in range(21):\n data[\"x{}\".format(i)] += list(list_data[:, 0, i])\n data[\"y{}\".format(i)] += list(list_data[:, 1, i])\n\n print(fileName, \"imported\")\n else:\n print(fileName, \"not found\")\n return data\n\n\ndef generateBodyDataset(labels, dataset_path):\n data = {\"label\": [], \"accuracy\": []}\n for i in range(25):\n data.update({\"x{}\".format(i): [], \"y{}\".format(i): []})\n\n for label in labels:\n fileName = label + \"_body.json\"\n filePath = dataset_path / fileName\n\n if filePath.is_file():\n list_data, list_accuracy = loadFile(filePath, False)\n data[\"label\"] += [label] * list_data.shape[0]\n data[\"accuracy\"] += list(list_accuracy)\n\n for i in range(25):\n data[\"x{}\".format(i)] += list(list_data[:, 0, i])\n data[\"y{}\".format(i)] += list(list_data[:, 1, i])\n\n print(fileName, \"imported\")\n else:\n print(fileName, \"not found\")\n return data\n\n\ndef run():\n handLabels = [\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"Chef\",\n \"Help\",\n \"Super\",\n \"VIP\",\n \"Water\",\n \"Metal\",\n \"Dislike\",\n \"Loser\",\n \"Phone\",\n \"Shaka\",\n \"Stop\",\n \"Vulcan_Salute\",\n \"Power_Fist\",\n \"Horns\",\n \"Fight_Fist\",\n \"Middle_Finger\",\n \"Ok\",\n ]\n\n bodyLabels = [\n \"Seated\",\n \"Stand\",\n \"Stand_RightArmRaised\",\n \"Stand_LeftArmRaised\",\n \"T\",\n \"MilitarySalute\",\n \"PushUp_Low\",\n \"Squat\",\n \"Plank\",\n \"Yoga_Tree_left\",\n \"Yoga_Tree_right\",\n \"Yoga_UpwardSalute\",\n \"Yoga_Warrior2_left\",\n \"Yoga_Warrior2_right\",\n \"Traffic_AllStop\",\n \"Traffic_BackStop\",\n \"Traffic_FrontStop\",\n \"Traffic_BackFrontStop\",\n \"Traffic_LeftTurn\",\n \"Traffic_RightTurn\",\n ]\n\n datasetHands = pd.DataFrame(generateBodyDataset(bodyLabels, DATASETS_PATH / \"Body\"))\n datasetHands.to_csv(DATASETS_PATH / \"BodyPose_Dataset.csv\", index=False)\n\n datasetBody = pd.DataFrame(generateHandDataset(handLabels, DATASETS_PATH / \"Hands\"))\n datasetBody.to_csv(DATASETS_PATH / \"HandPose_Dataset.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n run()\n"
},
{
"alpha_fraction": 0.6594488024711609,
"alphanum_fraction": 0.686515748500824,
"avg_line_length": 36.62963104248047,
"blob_id": "03c97b3e839f1395ec9330dfef1cbf3a37499057",
"content_id": "cad9cd84d1a6eeab58c72144c9ca1887820b9a80",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 2032,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 54,
"path": "/pyproject.toml",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"pose-classification-kit\"\nversion = \"1.1.5\"\ndescription = \"From pose estimation to pose classification - Creation of datasets & real-time visualization\"\nauthors = [\"ArthurFDLR <[email protected]>\"]\nkeywords = [\"pose-classification\", \"OpenPose\", \"pose-estimation\", \"machine-learning\", \"deep-learning\", \"keypoints\", \"keypoints-detection\", \"gesture\"]\nreadme = \"README.md\"\nlicense = \"MIT\"\nhomepage = \"https://github.com/ArthurFDLR/pose-classification-kit\"\nrepository = \"https://github.com/ArthurFDLR/pose-classification-kit\"\ninclude = [\n \"LICENSE\", \"README.md\"\n]\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: X11 Applications :: Qt\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n]\n\n[tool.poetry.dependencies]\npython = \">=3.6.2, <4.0\"\ntensorflow = \"*\"\nnumpy = \"~1.19.2\" # tensorflow requirement\npandas = \"^1.1.5\"\nopencv-python = {version = \"^4.4.0\", optional = true}\nmatplotlib = {version = \"^3.3.2\", optional = true}\nPyQt5 = {version = \"^5.15.4\", optional = true}\nqimage2ndarray = {version = \"^1.8.3\", optional = true}\n\n[tool.poetry.extras]\napp = [\"PyQt5\", \"matplotlib\", \"opencv-python\", \"qimage2ndarray\"]\n\n[tool.poetry.dev-dependencies]\npytest = \"^5.2\"\nblack = \"^20.8b1\"\n\n[tool.poetry.scripts]\nexport-datasets = \"pose_classification_kit.scripts.dataset_export:run\"\npose-classification-app = \"pose_classification_kit.app:run\"\nvideo-overlay = \"pose_classification_kit.scripts.video_creation:run\"\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0a5\"]\nbuild-backend = \"poetry.core.masonry.api\"\n"
},
{
"alpha_fraction": 0.5596416592597961,
"alphanum_fraction": 0.5683639645576477,
"avg_line_length": 39.400001525878906,
"blob_id": "6c6e4cd88ecd5a9d719ca3fb89d21b864a2f1b2d",
"content_id": "73e31db40538808412976822e73c298a3c153386",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4242,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 105,
"path": "/pose_classification_kit/src/keypoints_analysis/classifier_selection_widget.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "from pose_classification_kit.datasets.body_models import BODY18\nfrom ..imports.qt import QtWidgets, QtCore, pyqtSignal\nfrom ...config import MODELS_PATH\n\nimport json\n\n\nclass ClassifierSelectionWidget(QtWidgets.QWidget):\n # newClassifierModel_Signal: url to load classifier model, model infos from JSON, handID\n newClassifierModel_Signal = pyqtSignal(str, object, int)\n\n def __init__(self, parent=None, bodyClassification: bool = False):\n super().__init__()\n self.parent = parent\n self.modelRight = None\n self.modelLeft = None\n self.modelsPath = MODELS_PATH / (\"Body\" if bodyClassification else \"Hands\")\n self.bodyClassification = bodyClassification\n\n self.leftWidget = QtWidgets.QWidget()\n self.layout = QtWidgets.QGridLayout(self)\n self.setLayout(self.layout)\n self.layout.setContentsMargins(0, 0, 0, 0)\n classifierLabel = QtWidgets.QLabel(\"Classifier:\")\n classifierLabel.setSizePolicy(\n QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum\n )\n self.layout.addWidget(classifierLabel, 1, 0, 1, 1)\n\n self.classifierSelector = QtWidgets.QComboBox(\n cursor=QtCore.Qt.PointingHandCursor\n )\n self.classifierSelector.addItems(self.getAvailableClassifiers())\n self.layout.addWidget(self.classifierSelector, 1, 1, 1, 1)\n self.classifierSelector.currentTextChanged.connect(self.loadModel)\n\n updateClassifierButton = QtWidgets.QPushButton(\n \"Update list\",\n cursor=QtCore.Qt.PointingHandCursor,\n toolTip=\"Refresh list of model available in associated folder\",\n )\n updateClassifierButton.clicked.connect(self.updateClassifier)\n self.layout.addWidget(updateClassifierButton, 1, 2, 1, 1)\n\n def loadModel(self, name: str):\n \"\"\"Load full (structures + weigths) h5 model.\n\n Args:\n name (string): Name of the model. The folder .\\models\\name must contain: modelName_right.h5, modelName_left.h5, class.txt\n \"\"\"\n if name != \"None\":\n pathFolder = self.modelsPath / name\n if pathFolder.is_dir():\n\n ModelInfoPath = next(pathFolder.glob(\"*.json\"), None)\n modelInfo = None\n if ModelInfoPath:\n with open(ModelInfoPath, \"r\") as f:\n try:\n modelInfo = json.load(f)\n except:\n modelInfo = None\n\n if self.bodyClassification:\n availableModelPath = next(pathFolder.glob(\"*.h5\"), None)\n if availableModelPath:\n self.newClassifierModel_Signal.emit(\n str(availableModelPath), modelInfo, 2\n )\n else:\n self.newClassifierModel_Signal.emit(\"None\", {}, 2)\n\n else:\n availableModels = list(pathFolder.glob(\"*_right.h5\"))\n if len(availableModels) > 0:\n self.newClassifierModel_Signal.emit(\n str(availableModels[0]), modelInfo, 1\n )\n else:\n self.newClassifierModel_Signal.emit(\"None\", {}, 1)\n\n availableModels = list(pathFolder.glob(\"*_left.h5\"))\n if len(availableModels) > 0:\n self.newClassifierModel_Signal.emit(\n str(availableModels[0]), modelInfo, 0\n )\n else:\n self.newClassifierModel_Signal.emit(\"None\", {}, 0)\n\n else:\n self.newClassifierModel_Signal.emit(\"None\", {}, -1)\n\n def getAvailableClassifiers(self):\n listOut = [\"None\"]\n # Get all directory that contains an h5 file.\n listOut += [\n x.stem\n for x in self.modelsPath.glob(\"*\")\n if x.is_dir() and next(x.glob(\"*.h5\"), None)\n ]\n return listOut\n\n def updateClassifier(self):\n self.classifierSelector.clear()\n self.classifierSelector.addItems(self.getAvailableClassifiers())\n"
},
{
"alpha_fraction": 0.6334269642829895,
"alphanum_fraction": 0.6558988690376282,
"avg_line_length": 21.967741012573242,
"blob_id": "adcf0b2604544067b1b421353fd67e0c31797e23",
"content_id": "7b30d8119d8612b554cf74f6482a75bd00b8013a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 712,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 31,
"path": "/pose_classification_kit/src/imports/qt.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import sys\n\nPYSIDE2_LOADED = False\nPYQT5_LOADED = False\n\nif not PYSIDE2_LOADED:\n try:\n from PyQt5 import QtGui, QtWidgets, QtCore, QtMultimedia\n from PyQt5.QtCore import pyqtSignal, pyqtSlot\n\n PYQT5_LOADED = True\n print(\"Use PyQt5\")\n except:\n pass\n\nif not PYQT5_LOADED:\n try:\n from PySide2 import QtGui, QtWidgets, QtCore, QtMultimedia\n from PySide2.QtCore import Signal as pyqtSignal, Slot as pyqtSlot\n\n PYSIDE2_LOADED = True\n print(\"Use PySide2\")\n except:\n pass\n\nx = 50\n\nif not PYQT5_LOADED and not PYSIDE2_LOADED:\n sys.exit(\n \"Missing application dependancies, try:\\n\\tpip install pose-classification-kit[app]\"\n )\n"
},
{
"alpha_fraction": 0.5323755145072937,
"alphanum_fraction": 0.5478109121322632,
"avg_line_length": 32.336570739746094,
"blob_id": "38dc58a706fc68ccc63df344a72e4a9fafd20585",
"content_id": "ba84c06d44a95662bf14607db65bfae8f06adea7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10301,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 309,
"path": "/pose_classification_kit/scripts/video_creation.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "## WORK IN PROGRESS\n## CURRENTLY NOT WORKING\n\nimport numpy as np\nimport cv2\nfrom pathlib import Path\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfont = {\n \"family\": \"serif\",\n \"weight\": \"normal\",\n \"size\": \"22\",\n \"serif\": \"DejaVu Sans\",\n}\nmatplotlib.rc(\"font\", **font)\n\nOPENPOSE_PATH = Path(\"C:/\") / \"Program files\" / \"OpenPose\"\n\ntry:\n sys.path.append(str(OPENPOSE_PATH / \"build\" / \"python\" / \"openpose\" / \"Release\"))\n releasePATH = OPENPOSE_PATH / \"build\" / \"x64\" / \"Release\"\n binPATH = OPENPOSE_PATH / \"build\" / \"bin\"\n modelsPATH = OPENPOSE_PATH / \"models\"\n os.environ[\"PATH\"] = (\n os.environ[\"PATH\"] + \";\" + str(releasePATH) + \";\" + str(binPATH) + \";\"\n )\n import pyopenpose as op\n\n OPENPOSE_LOADED = True\n print(\"OpenPose ({}) loaded.\".format(str(OPENPOSE_PATH)))\nexcept:\n OPENPOSE_LOADED = False\n print(\"OpenPose ({}) loading failed.\".format(str(OPENPOSE_PATH)))\n\ntry:\n import tensorflow as tf\n\n GPU_LIST = tf.config.experimental.list_physical_devices(\"GPU\")\n if GPU_LIST:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in GPU_LIST:\n # Prevent Tensorflow to take all GPU memory\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n print(\n len(GPU_LIST), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\"\n )\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n TF_LOADED = True\nexcept:\n TF_LOADED = False\n\n\ndef format_data(handKeypoints, hand_id: int):\n \"\"\"Return the key points of the hand seen in the image (cf. videoSource).\n\n Args:\n hand_id (int): 0 -> Left hand | 1 -> Right hand\n\n Returns:\n np.ndarray((3,21),float): Coordinates x, y and the accuracy score for each 21 key points.\n None if the given hand is not detected.\n \"\"\"\n openhand_format = None\n personID = 0\n\n nbrPersonDetected = handKeypoints.shape[1] if handKeypoints.ndim > 2 else 0\n handAccuaracyScore = 0.0\n if nbrPersonDetected > 0:\n handAccuaracyScore = handKeypoints[hand_id, personID].T[2].sum()\n handDetected = handAccuaracyScore > 1.0\n if handDetected:\n handKeypoints = handKeypoints[hand_id, personID]\n # Initialize with the length of the first segment of each fingers\n lengthFingers = [\n np.sqrt(\n (handKeypoints[0, 0] - handKeypoints[i, 0]) ** 2\n + (handKeypoints[0, 1] - handKeypoints[i, 1]) ** 2\n )\n for i in [1, 5, 9, 13, 17]\n ]\n for i in range(3): # Add length of other segments of each fingers\n for j in range(len(lengthFingers)):\n x = (\n handKeypoints[1 + j * 4 + i + 1, 0]\n - handKeypoints[1 + j * 4 + i, 0]\n )\n y = (\n handKeypoints[1 + j * 4 + i + 1, 1]\n - handKeypoints[1 + j * 4 + i, 1]\n )\n lengthFingers[j] += np.sqrt(x ** 2 + y ** 2)\n normMax = max(lengthFingers)\n\n handCenterX = handKeypoints.T[0].sum() / handKeypoints.shape[0]\n handCenterY = handKeypoints.T[1].sum() / handKeypoints.shape[0]\n\n outputArray = np.array(\n [\n (handKeypoints.T[0] - handCenterX) / normMax,\n -(handKeypoints.T[1] - handCenterY) / normMax,\n (handKeypoints.T[2]),\n ]\n )\n\n openhand_format = []\n for i in range(outputArray.shape[1]):\n openhand_format.append(outputArray[0, i]) # add x\n openhand_format.append(outputArray[1, i]) # add y\n openhand_format = np.array(openhand_format)\n\n return openhand_format, handAccuaracyScore\n\n\ndef getFPS(video):\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split(\".\")\n if int(major_ver) < 3:\n fps = video.get(cv2.cv.CV_CAP_PROP_FPS)\n else:\n fps = video.get(cv2.CAP_PROP_FPS)\n return fps\n\n\ndef getFrameNumber(video) -> int:\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split(\".\")\n if int(major_ver) < 3:\n frame = video.get(cv2.cv.CAP_PROP_FRAME_COUNT)\n else:\n frame = video.get(cv2.CAP_PROP_FRAME_COUNT)\n return int(frame)\n\n\ndef getHeight(video) -> int:\n return int(video.get(4))\n\n\ndef getWidth(video) -> int:\n return int(video.get(3))\n\n\ndef create_plot(classifier_labels, prediction_probabilities, save_url):\n assert len(classifier_labels) == len(prediction_probabilities)\n fig, ax = plt.subplots(figsize=(4, 10))\n fig.subplots_adjust(left=0.1, right=0.9, top=0.96, bottom=0.04)\n plt.box(on=None)\n plt.tick_params(axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False)\n plt.tick_params(\n axis=\"y\", direction=\"in\", pad=-50, which=\"both\", left=False, labelleft=True\n )\n ax.set_yticks(np.arange(len(prediction_probabilities)))\n ax.set_yticklabels(classifier_labels, ha=\"left\")\n ax.barh(\n np.arange(len(prediction_probabilities)),\n prediction_probabilities,\n color=\"#9500ff\",\n )\n fig.savefig(save_url, transparent=True, dpi=108, pad_inches=0.0)\n plt.close(fig)\n\n\ndef run():\n current_path = Path.cwd()\n\n # Load Keras model\n classifier_name = \"24Output-2x128-17epochs\"\n classifier_path = current_path / \"Models\" / classifier_name\n right_hand_classifier = tf.keras.models.load_model(\n classifier_path / (classifier_name + \"_right.h5\")\n )\n left_hand_classifier = tf.keras.models.load_model(\n classifier_path / (classifier_name + \"_left.h5\")\n )\n hand_classifiers = (left_hand_classifier, right_hand_classifier)\n\n if os.path.isfile(classifier_path / \"class.txt\"):\n with open(classifier_path / \"class.txt\", \"r\") as file:\n first_line = file.readline()\n classifier_labels = first_line.split(\",\")\n for i in range(len(classifier_labels)):\n classifier_labels[i] = classifier_labels[i].replace(\"_\", \" \")\n\n # Open video\n video_path = current_path / \"video\" / \"hand_gesture_cc.mp4\"\n video_in = cv2.VideoCapture(str(video_path))\n video_nbr_frame = getFrameNumber(video_in)\n\n # Create output video\n outputs_name = \"output\"\n video_out_path = current_path / \"video\" / \"output\" / (outputs_name + \".avi\")\n barchart_out_path = current_path / \"video\" / \"output\" / (outputs_name + \"_barchart\")\n barchart_out_path.mkdir(exist_ok=True)\n fourcc = cv2.VideoWriter_fourcc(*\"XVID\")\n video_out = cv2.VideoWriter(\n str(video_out_path),\n fourcc,\n getFPS(video_in),\n (getWidth(video_in), getHeight(video_in)),\n )\n\n # Load OpenPose\n params = dict()\n params[\"model_folder\"] = str(modelsPATH)\n params[\"face\"] = True\n params[\"hand\"] = True\n params[\"disable_multi_thread\"] = False\n netRes = 22 # Default 22\n params[\"net_resolution\"] = \"-1x\" + str(16 * netRes)\n\n opWrapper = op.WrapperPython()\n datum = op.Datum()\n opWrapper.configure(params)\n opWrapper.start()\n\n # Analyse video\n print(\"\\n\\nPress 'q' to stop analysis\")\n for frame_index in range(video_nbr_frame):\n if not video_in.isOpened():\n break\n else:\n hand_id = 1\n\n # Get frame\n ret, frame = video_in.read()\n\n # OpenPose analysis\n if type(frame) != type(None):\n datum.cvInputData = frame\n opWrapper.emplaceAndPop([datum])\n frame = datum.cvOutputData\n else:\n break\n wrists_positions = [(0, 0), (0, 0)]\n if datum.poseKeypoints.ndim > 1:\n body_keypoints = np.array(datum.poseKeypoints[0])\n wrists_positions = [\n (body_keypoints[7][0], body_keypoints[7][1]),\n (body_keypoints[4][0], body_keypoints[4][1]),\n ]\n hand_keypoints = np.array(datum.handKeypoints)\n hand_data, _ = format_data(hand_keypoints, hand_id)\n\n # OpenHand analysis\n prediction_label = \"\"\n prediction_probabilities = np.zeros(len(classifier_labels))\n if type(hand_data) != type(None):\n prediction_probabilities = hand_classifiers[hand_id].predict(\n np.array([hand_data])\n )[0]\n prediction_label = classifier_labels[\n np.argmax(prediction_probabilities)\n ]\n prediction_label = prediction_label.replace(\"_\", \" \")\n\n # Overlay result on video\n font = cv2.FONT_HERSHEY_SIMPLEX\n scale = 2\n thickness = 2\n color = (255, 0, 149)\n (label_width, label_height), baseline = cv2.getTextSize(\n prediction_label, font, scale, thickness\n )\n txt_position = tuple(\n map(\n lambda i, j: int(i - j),\n wrists_positions[hand_id],\n (label_width + 80, 70),\n )\n )\n cv2.putText(\n frame,\n prediction_label,\n txt_position,\n font,\n scale,\n color,\n thickness,\n lineType=cv2.LINE_AA,\n )\n\n # Display image\n cv2.imshow(\"frame\", frame)\n\n # Write image\n video_out.write(frame)\n\n # Create probabilities barchart\n create_plot(\n classifier_labels[:-1],\n prediction_probabilities[:-1],\n barchart_out_path / \"{}.png\".format(frame_index),\n )\n\n # Control\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n\n video_in.release()\n video_out.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\" and OPENPOSE_LOADED:\n run()\n"
},
{
"alpha_fraction": 0.5955173373222351,
"alphanum_fraction": 0.606519341468811,
"avg_line_length": 34.08134078979492,
"blob_id": "7b79d036d597a10e88c039fdd65a71a8693ed329",
"content_id": "8c7f68411a94b8b4d4ae589f773c64d2131d48f6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21996,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 627,
"path": "/pose_classification_kit/src/dataset_controller.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nfrom datetime import date\nimport numpy as np\nfrom pathlib import Path\nimport json\n\nfrom .imports.qt import QtWidgets, QtCore, QtGui, pyqtSignal, pyqtSlot\nfrom .imports.openpose import OPENPOSE_LOADED\n\nif OPENPOSE_LOADED:\n from .imports.openpose import op\nfrom ..config import DATASETS_PATH\nfrom ..datasets.body_models import BODY25\n\n\nclass ScrollLabel(QtWidgets.QScrollArea):\n def __init__(self):\n super().__init__()\n\n self.setWidgetResizable(True)\n content = QtWidgets.QWidget(self)\n self.setWidget(content)\n lay = QtWidgets.QVBoxLayout(content)\n self.label = QtWidgets.QLabel(content)\n self.label.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\n lay.addWidget(self.label)\n\n def setText(self, text):\n self.label.setText(text)\n\n\nclass DatasetControllerWidget(QtWidgets.QWidget):\n realTimeHandDraw_Signal = pyqtSignal(bool)\n stylesheet = \"\"\"\n #Dataset_Controller {\n background-color: white;\n border-radius: 3px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n #Dataset_Controller:disabled {\n background-color: #e8e8e8;\n }\n\n QPushButton {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: white;\n }\n QPushButton:hover {\n border-color: rgb(139, 173, 228);\n }\n QPushButton:pressed {\n color: #cbcbcb;\n }\n QPushButton:disabled {\n background: #e8e8e8;\n }\n\n QToolButton {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: white;\n }\n QToolButton:hover {\n border-color: rgb(139, 173, 228);\n }\n QToolButton:disabled {\n background: #e8e8e8;\n }\n\n #Record_Button {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: #ffb3b3;\n }\n #Record_Button:checked {\n background: #b3ffb3;\n }\n #Record_Button:disabled {\n background: #e8e8e8;\n }\n #Record_Button:hover {\n border-color: rgb(139, 173, 228);\n }\n\n QComboBox {\n border: 1px solid #cbcbcb;\n border-radius: 2px;\n font-size: 16px;\n background: white;\n }\n QComboBox:disabled {\n background: #e8e8e8;\n }\n\n QLabel {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n\n QLineEdit {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n\n QCheckBox {\n font-size: 16px;\n font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;\n }\n QCheckBox:disabled {\n background: #e8e8e8;\n }\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent=parent)\n self.parent = parent\n self.currentFilePath = \"\"\n self.currentFileInfos = \"\"\n self.poseName = \"\"\n self.focusID = 1\n self.sizeData = 0\n self.tresholdValue = 0.0\n self.datasetList = []\n self.accuracyList = []\n self.currentDataIndex = 0\n self.datasetSaved = True\n\n ## Widget style\n self.setObjectName(\"Dataset_Controller\")\n self.setEnabled(False)\n self.setAttribute(QtCore.Qt.WA_StyledBackground, True)\n self.setStyleSheet(self.stylesheet)\n\n effect = QtWidgets.QGraphicsDropShadowEffect(self)\n effect.setBlurRadius(10)\n effect.setOffset(0, 0)\n effect.setColor(QtCore.Qt.gray)\n self.setGraphicsEffect(effect)\n\n ## Widgets initialisation\n self.layout = QtWidgets.QGridLayout(self)\n self.setLayout(self.layout)\n\n self.fileLabel = ScrollLabel()\n self.fileLabel.setText(\"No file selected\")\n self.fileLabel.setMinimumHeight(90)\n self.fileLabel.setMaximumHeight(90)\n self.fileLabel.setMinimumWidth(180)\n self.layout.addWidget(self.fileLabel, 0, 0, 1, 9, QtCore.Qt.AlignTop)\n\n self.visuCheckbox = QtWidgets.QCheckBox(\"Visualize dataset\")\n self.layout.addWidget(self.visuCheckbox, 1, 0)\n self.visuCheckbox.toggled.connect(self.visuCheckboxToggled)\n self.visuCheckbox.setEnabled(False)\n\n self.minusButton = QtWidgets.QToolButton(\n cursor=QtCore.Qt.PointingHandCursor, toolTip=\"Previous sample in dataset\"\n )\n self.minusButton.setArrowType(QtCore.Qt.LeftArrow)\n self.layout.addWidget(self.minusButton, 1, 1, 1, 1)\n self.minusButton.setEnabled(False)\n self.minusButton.clicked.connect(\n lambda: self.setCurrentDataIndex(self.currentDataIndex - 1)\n )\n QtWidgets.QShortcut(\n QtGui.QKeySequence(\"left\"),\n self,\n lambda: self.setCurrentDataIndex(self.currentDataIndex - 1),\n )\n\n self.currentIndexLine = QtWidgets.QLineEdit(str(self.currentDataIndex))\n self.currentIndexLine.setValidator(QtGui.QDoubleValidator())\n self.currentIndexLine.setMaximumWidth(40)\n self.currentIndexLine.setEnabled(False)\n self.layout.addWidget(self.currentIndexLine, 1, 2, 1, 1)\n self.currentIndexLine.textChanged.connect(self.userIndexInput)\n\n self.maxIndexLabel = QtWidgets.QLabel(r\"/0\")\n self.maxIndexLabel.setEnabled(False)\n self.layout.addWidget(self.maxIndexLabel, 1, 3, 1, 1)\n\n self.plusButton = QtWidgets.QToolButton(\n cursor=QtCore.Qt.PointingHandCursor, toolTip=\"Next sample in dataset\"\n )\n self.plusButton.setArrowType(QtCore.Qt.RightArrow)\n self.layout.addWidget(self.plusButton, 1, 4, 1, 1)\n self.plusButton.setEnabled(False)\n self.plusButton.clicked.connect(\n lambda: self.setCurrentDataIndex(self.currentDataIndex + 1)\n )\n QtWidgets.QShortcut(\n QtGui.QKeySequence(\"right\"),\n self,\n lambda: self.setCurrentDataIndex(self.currentDataIndex + 1),\n )\n\n self.deleteButton = QtWidgets.QPushButton(\n \"Delete sample\",\n cursor=QtCore.Qt.PointingHandCursor,\n toolTip=\"Remove sample from the dataset\",\n )\n self.deleteButton.setEnabled(False)\n self.layout.addWidget(self.deleteButton, 1, 5, 1, 1)\n self.deleteButton.clicked.connect(\n lambda: self.removeEntryDataset(self.currentDataIndex)\n )\n\n self.recordButton = QtWidgets.QPushButton(\n \"Record samples\",\n cursor=QtCore.Qt.PointingHandCursor,\n toolTip=\"Start and stop sample recording\",\n )\n self.recordButton.setObjectName(\"Record_Button\")\n self.recordButton.setCheckable(True)\n self.recordButton.setChecked(False)\n self.recordButton.setEnabled(False)\n self.recordButton.clicked.connect(self.startRecording)\n self.layout.addWidget(self.recordButton, 1, 7, 1, 1)\n\n horSpacer = QtWidgets.QSpacerItem(\n 0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum\n )\n self.layout.addItem(horSpacer, 1, 6)\n\n verSpacer = QtWidgets.QSpacerItem(\n 0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding\n )\n self.layout.addItem(verSpacer, 2, 0)\n\n def createDataset(self):\n dlg = CreateDatasetDialog(self)\n if dlg.exec_():\n self.clearDataset()\n self.updateFileInfo(\n dlg.getFilePath(),\n dlg.getFileInfos(),\n 0,\n dlg.getPoseName(),\n dlg.getFocusID(),\n dlg.getTresholdValue(),\n )\n self.setCurrentDataIndex(0)\n\n def addEntryDataset(self, keypoints, accuracy: float):\n \"\"\"Add keypoints and accuracy of a hand pose to the local dataset.\n\n Args:\n keypoints (np.ndarray((3,21),float)): Coordinates x, y and the accuracy score for each 21 key points.\n accuracy (float): Global accuracy of detection of the pose.\n \"\"\"\n self.datasetList.append(keypoints)\n self.accuracyList.append(accuracy)\n self.maxIndexLabel.setText(\"/\" + str(len(self.datasetList)))\n self.datasetSaved = False\n\n def removeEntryDataset(self, index: int):\n \"\"\"Remove keypoints and accuracy referenced by its index from the local dataset.\n\n Args:\n index (int): Index in list of the entry removed.\n \"\"\"\n self.datasetList = self.datasetList[:index] + self.datasetList[index + 1 :]\n self.accuracyList = self.accuracyList[:index] + self.accuracyList[index + 1 :]\n maxIndex = len(self.accuracyList)\n self.maxIndexLabel.setText(\"/\" + str(maxIndex))\n index = min(index, maxIndex - 1)\n self.setCurrentDataIndex(index)\n self.datasetSaved = False\n\n def clearDataset(self):\n self.datasetList = []\n self.accuracyList = []\n self.datasetSaved = True\n\n def userIndexInput(self, indexStr: str):\n if indexStr.isdigit():\n self.setCurrentDataIndex(int(indexStr) - 1)\n elif len(indexStr) == 0:\n pass\n else:\n self.currentIndexLine.setText(str(self.currentDataIndex + 1))\n\n def visuCheckboxToggled(self, state: bool):\n self.realTimeHandDraw_Signal.emit(not state)\n self.plusButton.setEnabled(state)\n self.minusButton.setEnabled(state)\n self.currentIndexLine.setEnabled(state)\n self.maxIndexLabel.setEnabled(state)\n self.deleteButton.setEnabled(state)\n self.setCurrentDataIndex(0)\n\n def loadFileJSON(self):\n options = QtWidgets.QFileDialog.Options()\n fileName, _ = QtWidgets.QFileDialog.getOpenFileName(\n self,\n \"Open dataset\",\n str(DATASETS_PATH),\n \"Text Files (*.json)\",\n options=options,\n )\n\n if fileName:\n self.clearDataset()\n\n with open(fileName) as f:\n data = json.load(f)\n\n for entry in data[\"data\"]:\n self.addEntryDataset(\n np.array([entry[\"x\"], entry[\"y\"], entry[\"a\"]]),\n float(entry[\"detection_accuracy\"]),\n )\n\n self.updateFileInfo(\n filePath=fileName,\n fileInfo={\"info\": data[\"info\"], \"data\": []},\n sizeData=data[\"info\"][\"nbr_entries\"],\n poseName=data[\"info\"][\"label\"],\n focusID=data[\"info\"][\"focus_id\"],\n tresholdValue=data[\"info\"][\"threshold_value\"],\n )\n\n self.recordButton.setEnabled(True)\n self.setEnabled(True)\n self.visuCheckbox.setChecked(True)\n self.datasetSaved = True\n return True\n return False\n\n def updateFileInfo(\n self,\n filePath: str = None,\n fileInfo: str = None,\n sizeData: int = None,\n poseName: str = None,\n focusID: int = None,\n tresholdValue: int = None,\n ):\n self.visuCheckbox.setEnabled(True)\n if filePath:\n self.currentFilePath = filePath\n if fileInfo:\n self.currentFileInfos = fileInfo\n if sizeData:\n self.sizeData = sizeData\n self.maxIndexLabel.setText(\"/\" + str(self.sizeData))\n if poseName:\n self.poseName = poseName\n if focusID != None:\n self.focusID = focusID\n if tresholdValue != None:\n self.tresholdValue = tresholdValue\n self.fileLabel.setText(\n str(self.currentFilePath)\n + \"\\n -> {} entries for {} ({} hand) with a minimum accuracy of {}.\".format(\n str(self.sizeData),\n self.poseName,\n [\"left_hand\", \"right_hand\", \"body\"][self.focusID],\n str(self.tresholdValue),\n )\n )\n # self.maxIndexLabel.setText(\"/\" + str(self.sizeData))\n self.recordButton.setEnabled(True)\n self.setEnabled(True)\n\n def setCurrentDataIndex(self, index: int):\n if len(self.datasetList) == 0:\n self.currentDataIndex = 0\n self.parent.handClassifier.leftHandAnalysis.drawHand(None, 0.0)\n self.parent.handClassifier.rightHandAnalysis.drawHand(None, 0.0)\n else:\n if index >= len(self.datasetList):\n index = 0\n if index < 0:\n index = len(self.datasetList) - 1\n self.currentDataIndex = index\n\n if self.focusID == 0:\n self.parent.handClassifier.leftHandAnalysis.drawHand(\n np.array(self.datasetList[self.currentDataIndex]),\n self.accuracyList[self.currentDataIndex],\n )\n elif self.focusID == 1:\n self.parent.handClassifier.rightHandAnalysis.drawHand(\n np.array(self.datasetList[self.currentDataIndex]),\n self.accuracyList[self.currentDataIndex],\n )\n elif self.focusID == 2:\n self.parent.bodyClassifier.bodyAnalysis.drawBody(\n np.array(self.datasetList[self.currentDataIndex]),\n self.accuracyList[self.currentDataIndex],\n )\n self.currentIndexLine.setText(str(self.currentDataIndex + 1))\n\n def writeDataToJSON(self):\n \"\"\" Save the current dataset to the JSON file (URL: self.currentFilePath).\"\"\"\n if os.path.isfile(self.currentFilePath):\n fileData = self.currentFileInfos\n fileData[\"info\"][\"nbr_entries\"] = len(self.datasetList)\n fileData[\"data\"] = []\n self.updateFileInfo(sizeData=len(self.datasetList))\n print(len(self.datasetList))\n for accuracy, data in zip(self.accuracyList, self.datasetList):\n fileData[\"data\"].append(\n {\n \"detection_accuracy\": float(accuracy),\n \"x\": data[0].tolist(),\n \"y\": data[1].tolist(),\n \"a\": data[2].tolist(),\n }\n )\n\n with open(self.currentFilePath, \"w\") as outfile:\n json.dump(fileData, outfile, indent=4)\n\n self.datasetSaved = True\n\n def startRecording(self, state: bool):\n self.parent.isRecording = state\n\n def getTresholdValue(self) -> float:\n return self.tresholdValue\n\n def getFocusID(self) -> int:\n return self.focusID\n\n def getPoseName(self) -> str:\n return self.poseName\n\n def isSaved(self) -> bool:\n return self.datasetSaved\n\n\nclass CreateDatasetDialog(QtWidgets.QDialog):\n def __init__(self, parent=None):\n super(CreateDatasetDialog, self).__init__(parent=parent)\n\n self.setWindowTitle(\"Create new dataset\")\n self.setWindowFlag(QtCore.Qt.WindowContextHelpButtonHint, False)\n\n self.currentFolder = DATASETS_PATH\n self.currentFilePath = None\n self.currentPoseName = \"Default\"\n self.currentTresholdValue = 0.0\n\n ## Widgets initialisation\n self.folderLabel = ScrollLabel()\n self.folderLabel.setText(str(self.currentFolder))\n self.folderLabel.setMaximumHeight(35)\n self.folderLabel.setMinimumWidth(200)\n\n self.folderButton = QtWidgets.QPushButton(\"Change root folder\")\n self.folderButton.clicked.connect(self.changeSavingFolder)\n\n self.handSelection = FocusSelectionWidget(self)\n\n self.poseNameLine = QtWidgets.QLineEdit(self.currentPoseName)\n self.poseNameLine.textChanged.connect(self.changePoseName)\n\n self.tresholdValueLine = QtWidgets.QLineEdit(str(self.currentTresholdValue))\n onlyDouble = QtGui.QDoubleValidator()\n self.tresholdValueLine.setValidator(onlyDouble)\n self.tresholdValueLine.textChanged.connect(self.changeTresholdValue)\n\n self.createButton = QtWidgets.QPushButton(\"Create dataset\")\n self.createButton.clicked.connect(self.createDataset)\n\n ## Structure\n self.layout = QtWidgets.QGridLayout(self)\n self.setLayout(self.layout)\n self.layout.addWidget(self.folderLabel, 0, 0, 1, 5, QtCore.Qt.AlignTop)\n self.layout.addWidget(self.folderButton, 0, 5, 1, 1, QtCore.Qt.AlignTop)\n self.layout.addWidget(self.handSelection, 1, 0, 1, 1)\n self.layout.addWidget(self.poseNameLine, 1, 2, 1, 1)\n self.layout.addWidget(QtWidgets.QLabel(\"Label:\"), 1, 1, 1, 1)\n self.layout.addWidget(QtWidgets.QLabel(\"Accuracy threshold:\"), 1, 3, 1, 1)\n self.layout.addWidget(self.tresholdValueLine, 1, 4, 1, 1)\n self.layout.addWidget(self.createButton, 1, 5, 1, 1)\n self.layout.setRowStretch(0, 0)\n self.layout.setRowStretch(1, 0)\n self.layout.setRowStretch(2, 1)\n\n def createDataset(self):\n self.isRecording = True\n path = self.getSavingFolder()\n focusID = self.handSelection.getCurrentFocusID()\n fileName = (\n self.getPoseName()\n + \"_\"\n + [\"left_hand\", \"right_hand\", \"body\"][focusID]\n + \".json\"\n )\n path /= fileName\n if path.is_file():\n self.isRecording = False\n self.createButton.setEnabled(False)\n self.createButton.setText(\"Dataset allready created\")\n\n else:\n self.createButton.setEnabled(True)\n self.createButton.setText(\"Create dataset\")\n with open(path, \"w+\") as outfile:\n json.dump(self.getFileInfos(), outfile, indent=4, ensure_ascii=False)\n self.accept()\n self.currentFilePath = path\n\n def getFileHeadlines(self):\n folder = self.getPoseName()\n tresholdValue = self.getTresholdValue()\n handID = self.handSelection.getCurrentFocusID()\n\n output = \"\"\n output += folder + \",\" + str(handID) + \",\" + str(tresholdValue) + \"\\n\"\n output += \"## Data generated the \" + str(date.today()) + \" labelled \" + folder\n output += \" (\" + (\"right hand\" if handID == 1 else \"left hand\")\n output += \") with a global accuracy higher than \" + str(tresholdValue)\n output += \", based on OpenPose estimation.\\n\"\n\n output += \"## Data format: Coordinates x, y and accuracy of estimation a\\n\\n\"\n\n return output\n\n def getFileInfos(self):\n info = {\n \"info\": {\n \"label\": self.getPoseName(),\n \"focus\": [\"left_hand\", \"right_hand\", \"body\"][\n self.handSelection.getCurrentFocusID()\n ],\n \"nbr_entries\": 0,\n \"threshold_value\": self.getTresholdValue(),\n \"focus_id\": self.handSelection.getCurrentFocusID(),\n },\n \"data\": [],\n }\n if self.handSelection.getCurrentFocusID() == 2 and OPENPOSE_LOADED:\n info[\"info\"][\"BODY25_Mapping\"] = BODY25.mapping\n info[\"info\"][\"BODY25_Pairs\"] = BODY25.pairs\n return info\n\n @pyqtSlot()\n def changeSavingFolder(self):\n path_str = str(\n QtWidgets.QFileDialog.getExistingDirectory(self, \"Select Directory\")\n )\n if len(path_str) > 0:\n self.folderLabel.setText(path_str)\n self.currentFolder = Path(path_str)\n\n @pyqtSlot(str)\n def changePoseName(self, name: str):\n if not self.createButton.isEnabled():\n self.createButton.setEnabled(True)\n self.createButton.setText(\"Create dataset\")\n self.currentPoseName = name\n\n @pyqtSlot(str)\n def changeTresholdValue(self, value: str):\n try:\n self.currentTresholdValue = float(value.replace(\",\", \".\"))\n except:\n self.currentTresholdValue = 0.0\n\n def getSavingFolder(self):\n return self.currentFolder\n\n def getPoseName(self) -> str:\n return self.currentPoseName\n\n def getTresholdValue(self) -> float:\n return self.currentTresholdValue\n\n def getFocusID(self) -> int:\n return self.handSelection.getCurrentFocusID()\n\n def getFilePath(self):\n return self.currentFilePath\n\n def resizeEvent(self, event):\n self.folderButton.setFixedHeight(self.folderLabel.height())\n\n\nclass FocusSelectionWidget(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super(FocusSelectionWidget, self).__init__(parent)\n self.layout = QtWidgets.QGridLayout(self)\n self.setLayout(self.layout)\n self.parent = parent\n\n self.layout.addWidget(QtWidgets.QLabel(\"Focus:\"), 0, 0)\n self.rightCheckbox = QtWidgets.QCheckBox(\"Right hand\")\n self.leftCheckbox = QtWidgets.QCheckBox(\"Left hand\")\n self.bodyCheckbox = QtWidgets.QCheckBox(\"Body\")\n self.layout.addWidget(self.leftCheckbox, 0, 1)\n self.layout.addWidget(self.rightCheckbox, 0, 2)\n self.layout.addWidget(self.bodyCheckbox, 0, 3)\n\n horSpacer = QtWidgets.QSpacerItem(\n 0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum\n )\n self.layout.addItem(horSpacer, 0, 3)\n\n group = QtWidgets.QButtonGroup(self)\n group.addButton(self.rightCheckbox)\n group.addButton(self.leftCheckbox)\n group.addButton(self.bodyCheckbox)\n group.buttonClicked.connect(self.toggleFocus)\n\n self.bodyCheckbox.setChecked(True)\n self.focusID = 2\n\n def toggleFocus(self, btn):\n label = btn.text()\n if label == \"Left hand\":\n self.focusID = 0\n elif label == \"Right hand\":\n self.focusID = 1\n elif label == \"Body\":\n self.focusID = 2\n\n def getCurrentFocusID(self):\n return self.focusID\n"
},
{
"alpha_fraction": 0.625286877155304,
"alphanum_fraction": 0.6276909708976746,
"avg_line_length": 38.10683822631836,
"blob_id": "6ad36e2dd53abc8e5ef5e2c99d6e8a56870e532d",
"content_id": "adafd5d8dd9aa638fbf54934d0cf5921c0dc5a80",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9151,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 234,
"path": "/pose_classification_kit/app.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "from .src.imports.qt import QtWidgets, QtGui, QtCore\nfrom .src.imports.tensorflow import TF_STATUS_STR, TF_LOADED\n\nfrom .src.video_analysis.video_manager import CameraInput, VideoViewerWidget\nfrom .src.video_analysis.openpose_thread import VideoAnalysisThread, OPENPOSE_LOADED\nfrom .src.keypoints_analysis.hand_analysis import HandClassifierWidget\nfrom .src.keypoints_analysis.body_analysis import BodyClassifierWidget\nfrom .src.dataset_controller import DatasetControllerWidget\n\n# If the imports above are not resolved by your Python support system (pylance by default on VSC),\n# add ./pose-classification-kit as extra path (see \"python.analysis.extraPaths\" in .\\.vscode\\settings.json by default)\n\nimport sys\nfrom .config import OPENPOSE_PATH\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n ## Init\n super(MainWindow, self).__init__(parent)\n self.setWindowTitle(\"Pose Classification App\")\n\n ## Parameters\n self.isRecording = False\n self.realTimeHandDraw = True\n self.parent = parent\n\n ## Widgets\n self.cameraInput = CameraInput()\n\n self.videoViewer = VideoViewerWidget(self.cameraInput.getAvailableCam())\n self.videoViewer.camera_selector.currentIndexChanged.connect(\n self.cameraInput.select_camera\n )\n self.videoViewer.refreshButton.clicked.connect(self.refreshCameraList)\n\n self.datasetController = DatasetControllerWidget(self)\n self.datasetController.realTimeHandDraw_Signal.connect(\n self.changeHandDrawingState\n )\n\n self.AnalysisThread = VideoAnalysisThread(self.cameraInput)\n self.AnalysisThread.newFrame.connect(self.videoViewer.setFrame)\n self.AnalysisThread.newFrame.connect(self.analyseNewImage)\n self.AnalysisThread.start()\n self.AnalysisThread.setState(True)\n\n self.handClassifier = HandClassifierWidget()\n self.bodyClassifier = BodyClassifierWidget()\n\n ## Structure\n self.windowSplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)\n self.setCentralWidget(self.windowSplitter)\n self.windowSplitter.setChildrenCollapsible(False)\n\n leftWidget = QtWidgets.QWidget()\n leftLayout = QtWidgets.QVBoxLayout(leftWidget)\n leftLayout.addWidget(self.videoViewer)\n leftLayout.addWidget(self.datasetController)\n leftLayout.addItem(\n QtWidgets.QSpacerItem(\n 5, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding\n )\n )\n leftLayout.setStretch(0, 0)\n leftLayout.setStretch(1, 0)\n leftLayout.setStretch(2, 1)\n\n rightWidget = QtWidgets.QWidget()\n self.rightTabWidget = QtWidgets.QTabWidget()\n rightLayout = QtWidgets.QVBoxLayout(rightWidget)\n rightLayout.addWidget(self.rightTabWidget)\n\n self.rightTabWidget.addTab(self.bodyClassifier, \"Body\")\n self.rightTabWidget.addTab(self.handClassifier, \"Hands\")\n\n self.windowSplitter.addWidget(leftWidget)\n self.windowSplitter.addWidget(rightWidget)\n\n ## Menu\n bar = self.menuBar()\n fileAction = bar.addMenu(\"Dataset\")\n\n openAct = QtWidgets.QAction(\"&Open\", self)\n openAct.setShortcut(\"Ctrl+O\")\n openAct.setStatusTip(\"Open dataset\")\n openAct.triggered.connect(self.datasetController.loadFileJSON)\n fileAction.addAction(openAct)\n\n initAct = QtWidgets.QAction(\"&Create new ...\", self)\n initAct.setShortcut(\"Ctrl+N\")\n initAct.setStatusTip(\"Create dataset\")\n initAct.triggered.connect(self.datasetController.createDataset)\n fileAction.addAction(initAct)\n\n saveAct = QtWidgets.QAction(\"&Save\", self)\n saveAct.setShortcut(\"Ctrl+S\")\n saveAct.setStatusTip(\"Save dataset\")\n saveAct.triggered.connect(self.datasetController.writeDataToJSON)\n fileAction.addAction(saveAct)\n\n ## Status Bar\n self.statusBar = QtWidgets.QStatusBar()\n self.setStatusBar(self.statusBar)\n\n self.openpose_path = \"OpenPose installation path: \" + str(OPENPOSE_PATH)\n self.openpose_path_label = QtWidgets.QLabel(\n self.openpose_path,\n toolTip=\"If incorrect, change in ./openhand_classifier/src/__init__.py\",\n )\n self.statusBar.addWidget(self.openpose_path_label)\n\n self.openposeStatus = (\n \"OpenPose running.\" if OPENPOSE_LOADED else \"OpenPose not found\"\n )\n self.openposeStatusLabel = QtWidgets.QLabel(\n '<span style=\"color:'\n + (\"green\" if OPENPOSE_LOADED else \"red\")\n + '\">'\n + self.openposeStatus\n + \"</span>\"\n )\n self.statusBar.addWidget(self.openposeStatusLabel)\n\n self.tfStatusLabel = QtWidgets.QLabel(\n '<span style=\"color:'\n + (\"green\" if TF_LOADED else \"red\")\n + '\">'\n + TF_STATUS_STR\n + \"</span>\"\n )\n self.statusBar.addWidget(self.tfStatusLabel)\n\n def closeEvent(self, event):\n print(\"Closing\")\n exitBool = True\n if self.datasetController.isSaved():\n exitBool = True\n else:\n reply = QtWidgets.QMessageBox.question(\n self,\n \"Hand pose classifier\",\n \"Do you want to save \"\n + self.datasetController.getPoseName()\n + \" dataset?\",\n buttons=QtWidgets.QMessageBox.StandardButtons(\n QtWidgets.QMessageBox.Yes\n | QtWidgets.QMessageBox.No\n | QtWidgets.QMessageBox.Cancel\n ),\n )\n\n if reply == QtWidgets.QMessageBox.Cancel:\n exitBool = False\n elif reply == QtWidgets.QMessageBox.No:\n exitBool = True\n elif reply == QtWidgets.QMessageBox.Yes:\n self.datasetController.writeDataToJSON()\n exitBool = True\n\n if exitBool:\n event.accept()\n self.AnalysisThread.terminate()\n else:\n event.ignore()\n\n def keyPressEvent(self, event):\n if event.key() == 16777223 and self.datasetController.deleteButton.isEnabled():\n self.datasetController.removeEntryDataset(\n self.datasetController.currentDataIndex\n )\n\n def refreshCameraList(self):\n camList = self.cameraInput.refreshCameraList()\n if not camList:\n print(\"No camera\")\n else:\n self.videoViewer.camera_selector.clear()\n self.videoViewer.camera_selector.addItems(\n [c.description() for c in camList]\n )\n\n def analyseNewImage(self, image): # Call each time AnalysisThread emit a new pix\n self.videoViewer.setInfoText(self.AnalysisThread.getInfoText())\n\n bodyKeypoints, bodyAccuracy = self.AnalysisThread.getBodyData()\n leftHandKeypoints, leftAccuracy = self.AnalysisThread.getHandData(0)\n rightHandKeypoints, rightAccuracy = self.AnalysisThread.getHandData(1)\n\n # Draw hand or body on GUI\n if self.realTimeHandDraw:\n if self.rightTabWidget.currentWidget() == self.handClassifier:\n self.handClassifier.leftHandAnalysis.drawHand(\n leftHandKeypoints, leftAccuracy\n )\n self.handClassifier.rightHandAnalysis.drawHand(\n rightHandKeypoints, rightAccuracy\n )\n elif self.rightTabWidget.currentWidget() == self.bodyClassifier:\n self.bodyClassifier.bodyAnalysis.drawBody(bodyKeypoints, bodyAccuracy)\n # Recording left hand\n if self.datasetController.getFocusID() == 0:\n if type(leftHandKeypoints) != type(None):\n if self.isRecording:\n if leftAccuracy > self.datasetController.getTresholdValue():\n self.datasetController.addEntryDataset(\n leftHandKeypoints, leftAccuracy\n )\n # Recording right hand\n elif self.datasetController.getFocusID() == 1:\n if type(rightHandKeypoints) != type(None): # If selected hand detected\n if self.isRecording:\n if rightAccuracy > self.datasetController.getTresholdValue():\n self.datasetController.addEntryDataset(\n rightHandKeypoints, rightAccuracy\n )\n # Recording body\n elif self.datasetController.getFocusID() == 2:\n if type(bodyKeypoints) != type(None):\n if self.isRecording:\n if bodyAccuracy > self.datasetController.getTresholdValue():\n self.datasetController.addEntryDataset(\n bodyKeypoints, bodyAccuracy\n )\n\n def changeHandDrawingState(self, state: bool):\n self.realTimeHandDraw = state\n\n\ndef run():\n app = QtWidgets.QApplication(sys.argv)\n mainWindow = MainWindow()\n mainWindow.show()\n sys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 34.70000076293945,
"blob_id": "047f854cd8bc9f1bf8565be5254937448877bef8",
"content_id": "794968f1d36bfbab95f8a78c3d6487365c36a058",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 357,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 10,
"path": "/pose_classification_kit/config.py",
"repo_name": "sokoloveav/pose-classification-kit",
"src_encoding": "UTF-8",
"text": "import pathlib\n\n# Path to OpenPose installation folder on your system.\nOPENPOSE_PATH = pathlib.Path(\"C:/\") / \"Program files\" / \"OpenPose\"\n\n# Path to model folder.\nMODELS_PATH = pathlib.Path(\".\").resolve() / \"pose_classification_kit\" / \"models\"\n\n# Path to datasets folder.\nDATASETS_PATH = pathlib.Path(\".\").resolve() / \"pose_classification_kit\" / \"datasets\"\n"
}
] | 20 |
amulmgr/Machine-Learning-Program
|
https://github.com/amulmgr/Machine-Learning-Program
|
6b43b73f3866baf2967f297cfb5ff0d8f609968c
|
98955855ce51eea5aeba1227a846437d62c0f310
|
12e9939db3658a16b370877664b5c827a7df197d
|
refs/heads/master
| 2020-05-31T06:52:59.147193 | 2019-06-04T07:45:26 | 2019-06-04T07:45:26 | 190,153,623 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 43.5,
"blob_id": "f5246c951f3afcbb78c4367c54f38208e0cba1b5",
"content_id": "80e19037fbb8d57b107dce76784581ef322a5fdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 6,
"path": "/Using Microphone_Speech rec.py",
"repo_name": "amulmgr/Machine-Learning-Program",
"src_encoding": "UTF-8",
"text": "\r\n import speech_recognition as sr\r\n>>> from guessing_game.py import recognize_speech_from_mic\r\n>>> r = sr.Recognizer()\r\n>>> m = sr.Microphone()\r\n>>> recognize_speech_from_mic(r, m) # speak after running this line\r\n{'success': True, 'error': None, 'transcription': 'hello'}\r\n"
}
] | 1 |
andreiburuntia/centenar
|
https://github.com/andreiburuntia/centenar
|
36edd39c8c350c7443529f705f1a1487cd57f681
|
d1816513fa664c0d17561f636d6f8d211927d007
|
53e52bc7582be350fe2c009678f843a56d6dac7e
|
refs/heads/master
| 2020-03-28T12:51:50.064040 | 2018-09-11T15:57:59 | 2018-09-11T15:57:59 | 148,341,571 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 4.25,
"blob_id": "1a93e98db63db3872bf6527f3572551675260bf2",
"content_id": "f4fe4bb13c16edef60969c823f9d6572cad611e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 6,
"num_lines": 4,
"path": "/cen.py",
"repo_name": "andreiburuntia/centenar",
"src_encoding": "UTF-8",
"text": "some\nlines\nof\npython\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 4.25,
"blob_id": "c17a58be58bc8867d8735893065d213e19d2ff40",
"content_id": "42a762ed4794c5f82a72e9d7a3cb65fdeb325db6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 6,
"num_lines": 4,
"path": "/znar.js",
"repo_name": "andreiburuntia/centenar",
"src_encoding": "UTF-8",
"text": "some\nlines\nof\njsssss\n"
}
] | 2 |
dmlond/greenhouse_mqtt_microcontroller
|
https://github.com/dmlond/greenhouse_mqtt_microcontroller
|
315d51c0b258825740c8b79b76fbbe7068c06cca
|
6e2051087d73228b757aff578fd7ef6b4f870bd4
|
e7eded45940c450ea893fce1de04dcf2b46dc218
|
refs/heads/main
| 2023-06-11T11:27:59.252033 | 2021-07-02T00:39:38 | 2021-07-02T00:39:38 | 365,613,547 | 6 | 2 | null | null | null | null | null |
[
{
"alpha_fraction": 0.704066276550293,
"alphanum_fraction": 0.7134789228439331,
"avg_line_length": 29.247058868408203,
"blob_id": "06bff5483f10baac3cf2e69afdcb70e2e68e18c1",
"content_id": "17a0d04c79b33cc4d35d5a7c40271e8378c0d396",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2656,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 85,
"path": "/code.py",
"repo_name": "dmlond/greenhouse_mqtt_microcontroller",
"src_encoding": "UTF-8",
"text": "# https://learn.adafruit.com/mqtt-in-circuitpython/circuitpython-wifi-usage\r\n# https://learn.adafruit.com/mqtt-in-circuitpython/connecting-to-a-mqtt-broker\r\n# required from adafruit_bundle:\r\n# - adafruit_requests\r\n# - adafruit_minimqtt\r\n# - adafruit_bus_device\r\n# - adafruit_register\r\n# - adafruit_si7021\r\nimport time\r\nimport ssl\r\nimport socketpool\r\nimport wifi\r\nimport adafruit_minimqtt.adafruit_minimqtt as MQTT\r\nimport board\r\nfrom digitalio import DigitalInOut, Direction, Pull\r\nfrom analogio import AnalogIn\r\nimport adafruit_si7021\r\n\r\n# Add a secrets.py to your filesystem that has a dictionary like\r\n# that in secrets.py.example\r\n# pylint: disable=no-name-in-module,wrong-import-order\r\ntry:\r\n from secrets import secrets\r\nexcept ImportError:\r\n print(\"WiFi secrets are kept in secrets.py, please add them there!\")\r\n raise\r\n\r\nprint(\"Connecting to %s\" % secrets[\"ssid\"])\r\nwifi.radio.connect(secrets[\"ssid\"], secrets[\"password\"])\r\nprint(\"Connected to %s!\" % secrets[\"ssid\"])\r\n### Feeds ###\r\nlight_feed = \"greenhouse/light\"\r\ntemp_feed = \"greenhouse/temperature\"\r\nhumidity_feed = \"greenhouse/humidity\"\r\n\r\n# Define callback methods which are called when events occur\r\n# pylint: disable=unused-argument, redefined-outer-name\r\ndef connected(client, userdata, flags, rc):\r\n # This function will be called when the client is connected\r\n # successfully to the broker.\r\n print(\"Connected to MQTT!\")\r\n\r\ndef disconnected(client, userdata, rc):\r\n # This method is called when the client is disconnected\r\n print(\"Disconnected from MQTT!\")\r\n\r\n# Create a socket pool\r\npool = socketpool.SocketPool(wifi.radio)\r\n\r\n# Set up a MiniMQTT Client\r\nmqtt_client = MQTT.MQTT(\r\n broker=secrets[\"broker\"],\r\n port=secrets[\"port\"],\r\n username=secrets[\"aio_username\"],\r\n password=secrets[\"aio_key\"],\r\n socket_pool=pool,\r\n ssl_context=ssl.create_default_context(),\r\n)\r\n\r\n# Setup the callback methods above\r\nmqtt_client.on_connect = connected\r\nmqtt_client.on_disconnect = disconnected\r\n\r\n# Connect the client to the MQTT broker.\r\nprint(\"Connecting to MQTT...\")\r\nmqtt_client.connect()\r\n\r\n# Create library object using our Bus I2C port\r\nsensor = adafruit_si7021.SI7021(board.I2C())\r\nlight_pin = AnalogIn(board.IO4)\r\n\r\nwhile True:\r\n # Poll the message queue\r\n mqtt_client.loop()\r\n\r\n # get the current temperature\r\n light_val = light_pin.value\r\n temp_val = ((sensor.temperature * 9)/5) + 32\r\n humidity_val = sensor.relative_humidity\r\n\r\n # Send a new messages\r\n mqtt_client.publish(light_feed, light_val)\r\n mqtt_client.publish(temp_feed, temp_val)\r\n mqtt_client.publish(humidity_feed, humidity_val)\r\n time.sleep(0.5)\r\n"
},
{
"alpha_fraction": 0.8096724152565002,
"alphanum_fraction": 0.8096724152565002,
"avg_line_length": 52.5,
"blob_id": "27d1114b160b0bd006502f1d805bd9e202b2fd1a",
"content_id": "23c96b5e9add19ce5076b0742ca0a119e449a547",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 641,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 12,
"path": "/README.md",
"repo_name": "dmlond/greenhouse_mqtt_microcontroller",
"src_encoding": "UTF-8",
"text": "# greenhouse_mqtt_microcontroller\nUsing [CircuitPython](https://circuitpython.org) and MQTT to publish metrics about a Greenhouse.\n\nYou will need the latest version of the [Adafruit CircuitPython Bundle](https://circuitpython.org/libraries) unzipped onto your\nworkstation. Inside this directory you will find all of the\nlibraries required to run this code.\n\nYou will need to copy secrets.py.example, and set the values in the\ndictionary to real values to connect to your wifi, and mqtt_broker.\n\ncode.py includes information on the adafruit libraries that must be installed, along with code.py and secrets.py, to your CIRCUITPYTHON\ndirectory."
}
] | 2 |
BabyRage21/Project_Dio
|
https://github.com/BabyRage21/Project_Dio
|
eb202d02495fb327cc29e9278e8ba4e86cd2de61
|
12c57cbb769c4938253918a8aa40defe0a26834c
|
921f8d58fc068ed2a326fb5591260f308d4c34af
|
refs/heads/master
| 2020-05-24T14:43:56.145181 | 2019-05-18T04:49:48 | 2019-05-18T04:49:48 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5775076150894165,
"alphanum_fraction": 0.609422504901886,
"avg_line_length": 26.41666603088379,
"blob_id": "9d0cb42c268bf7b3651621163176914ff260faf3",
"content_id": "c69ed7d3d93e5c4045a0fd1b40b98cb0f7eb7a22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 658,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 24,
"path": "/diogen/userApp/migrations/0004_auto_20190518_0943.py",
"repo_name": "BabyRage21/Project_Dio",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2 on 2019-05-18 04:43\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('userApp', '0003_eventprofile'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='eventprofile',\n name='place',\n field=models.CharField(blank=True, default='', max_length=100),\n ),\n migrations.AlterField(\n model_name='eventprofile',\n name='company',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userApp.PersonProfile'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6333209276199341,
"alphanum_fraction": 0.6364819407463074,
"avg_line_length": 30.273256301879883,
"blob_id": "8e574dbbc8c902718ff8f6a8eea2be7fe0a7b16b",
"content_id": "426a2da23151e3d6eaca3ea1139d2d362cb214a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5425,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 172,
"path": "/diogen/userApp/views.py",
"repo_name": "BabyRage21/Project_Dio",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, get_object_or_404, render_to_response, redirect\nfrom .models import *\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.urls import reverse\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nimport datetime\nfrom django.utils.timezone import utc\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import transaction\nfrom django.contrib.messages import constants as messages\nfrom django.contrib.auth.forms import UserCreationForm\nfrom userApp.forms import *\nfrom django.db.models import *\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\nfrom django.views.generic import ListView\n\nimport operator\n\ndef mainpage(request):\n return redirect('login/')\n\ndef registration(request):\n if request.method == 'POST':\n form1=ProfileForm\n #userform=UserForm\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n #messages.success(request, 'Account created successfully')\n\n #TEMP: логин сразу после регистрации\n new_user = authenticate(username=form.cleaned_data['username'],password=form.cleaned_data['password1'],)\n login(request, new_user)\n\n return redirect('upd/')\n \n else:\n pass\n #TEMP\n \n return HttpResponse('nani!!!')\n #return render(request, 'userApp/reg.html', {'form': form1})\n else:\n form = UserCreationForm()\n return render(request, 'userApp/create_user.html', {'form': form})\n\n\n\[email protected]\n@login_required\ndef update_profile(request):\n user_form = UserForm(request.POST, instance=request.user)\n profile_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)\n if request.method == 'POST':\n #user_form = UserForm(request.POST, instance=request.user)\n profile_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)\n \n\n if user_form.is_valid() and profile_form.is_valid():\n #profile = PersonProfile(image = request.FILES['image'])\n\n profile = profile_form.save(commit=False)\n profile.user = request.user\n profile.save()\n user_form.save()\n #profile_form.save()\n\n #return redirect('settings:profile')\n #return HttpResponse('success!')\n return redirect('/feed/')\n\n else:\n return render(request, 'userApp/reg.html', {\n 'user_form': user_form,\n 'profile_form': profile_form,\n })\n #return HttpResponse('test!')\n\n# @login_required\n# def newevent(request):\n# events = EventForm()\n# return render(request, 'userApp/newevent.html', {'events':events})\n\n@login_required\ndef newevent(request):\n eventform = EventForm()\n event = EventProfile.objects.get_or_create(company=request.u)\n if request.method == 'POST':\n #user_form = UserForm(request.POST, instance=request.user)\n\n if user_form.is_valid() and profile_form.is_valid():\n\n profile = profile_form.save(commit=False)\n profile.company = request.user\n profile.save()\n\n\n return redirect('/feed/')\n\n else:\n return render(request, 'userApp/reg.html', {'events':eventform})\n\n\nclass MusiciansList(ListView):\n model = PersonProfile\n #paginate_by = 10 # if pagination is desired\n context_object_name = 'musician_list'\n template_name = 'userApp/feed.html'\n\n def get_queryset(self):\n result = super(MusiciansList, self).get_queryset()\n query = self.request.GET.get('q')\n instrs = self.request.GET.get('instrs')\n genres = self.request.GET.get('genres')\n\n if (not query):\n query=''\n if(not instrs):\n instrs=''\n if(not genres):\n genres=''\n \n result = PersonProfile.objects.filter(Q(nickname__icontains=query) & Q(instruments__icontains=instrs) & Q(genres__icontains=genres))\n #result = result.filter(Q(nickname__icontains=query) & Q(instruments__icontains=instruments)\n\n return result\n\n \n\n\ndef profile(request, person_id):\n persondetail = get_object_or_404(PersonProfile, pk=person_id)\n userdetail = persondetail.user\n\n return render(request, 'userApp/profile.html', \n {'profile':persondetail,\n 'userprofile':userdetail,\n })\n\n\n'''\ndef registration(request):\n if request.method == \"POST\":\n name = request.POST.get(\"name\")\n #registertime = request.POST.get(\"registertime\")\n date = request.POST.get(\"date\")\n password = request.POST.get(\"password\")\n spec = request.POST.get(\"spec\")\n image= request.POST.get(\"image\")\n p = Person()\n p.name=name\n p.registertime=datetime.datetime.utcnow().replace(tzinfo=utc)\n p.date=date\n p.spec=spec\n p.phone=1337\n p.description='description'\n p.email='nu da'\n p.clean()\n p.image=image\n\n p.save()\n # age = request.POST.get(\"age\") # получение значения поля age\n return HttpResponse(\"<h2>Hello, {0}</h2>\".format(name))\n else:\n personform = PersonForm()\n return render(request, \"userApp/reg.html\", {\"form\": personform})\n\ndef login(request):\n pass\n'''"
},
{
"alpha_fraction": 0.6775320172309875,
"alphanum_fraction": 0.6775320172309875,
"avg_line_length": 41.849998474121094,
"blob_id": "c8750acfac9a98b28358ec9aa05a348e3d41cc47",
"content_id": "eb9e1ba0c5248945b72ce6c999723e1bdd317d94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 859,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 20,
"path": "/diogen/userApp/urls.py",
"repo_name": "BabyRage21/Project_Dio",
"src_encoding": "UTF-8",
"text": "from django.urls import path, include, re_path\nfrom django.views.generic import ListView, DetailView\nfrom . import views \nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('', views.mainpage , name='mainpage'),\n path('', include('django.contrib.auth.urls')),\n path('reg/', views.registration , name='registration'), \n #TEMP\n #re_path(r'^feed/([\\w-]+)/$', views.MusiciansList.as_view()),\n path('feed/', views.MusiciansList.as_view(), name='MusiciansList'),\n path('reg/upd/', views.update_profile , name='update_profile'),\n path('<int:person_id>/', views.profile, name=\"profile\"),\n path('newevent/', views.newevent, name=\"newevent\"),\n path('feed/search/', views.MusiciansList.as_view(), name=\"search\")\n \n \n] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n\n\n"
},
{
"alpha_fraction": 0.544056236743927,
"alphanum_fraction": 0.5451549291610718,
"avg_line_length": 32.955223083496094,
"blob_id": "8861b9e34486d9b0e16ef4e165cbe7489f17b94e",
"content_id": "8247d9b50720a0376cd32fa346c0cdb1cdcf7669",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5010,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 134,
"path": "/diogen/userApp/forms.py",
"repo_name": "BabyRage21/Project_Dio",
"src_encoding": "UTF-8",
"text": "from django import forms\n#from django import ValidationError\nfrom .models import *\nfrom django.contrib.auth.models import User\n\n\n\nclass UserForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email')\n labels = {\n 'first_name':'Имя',\n 'last_name':'Фамилия',\n }\n required={\n 'first_name':'True',\n 'last_name':'True',\n }\n\nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = PersonProfile\n fields = ('birth_date', 'adress', 'phone', 'description','image', 'nickname','genres', 'instruments', 'soundcloud', 'company')\n labels = {\n 'birth_date': 'Дата рождения',\n 'adress': 'Укажите свой город',\n 'phone': 'Телефон',\n 'description': 'Расскажите пару слов о себе',\n\n 'nickname': 'Ваш творческий ник:',\n 'genres': 'Жанры Вашего творчества',\n 'instruments':'Инструменты которыми вы владете',\n 'soundcloud':'Ссылка на soundcloud (опционально)',\n \n 'company':'Название вашей компании',\n }\n widgets = {\n 'birth_date': forms.TextInput(attrs={'class':'datepicker'}),\n 'description': forms.Textarea(attrs={'class':'materialize-textarea'})\n }\n initial = {\n 'birth_date': ''\n }\n\nclass EventForm(forms.ModelForm):\n class Meta:\n model = EventProfile\n fields = ('address', 'description', 'group', 'company', 'date')\n labels = {\n 'date': 'Время проведения',\n 'address': 'Место проведения',\n 'group': 'Выступает',\n 'place': 'Проводит',\n 'description': 'Описание'\n }\n widgets = {\n 'date': forms.TextInput(attrs={'class':'datepicker'}),\n 'description': forms.Textarea(attrs={'class':'materialize-textarea', 'id':'desc'}),\n 'address': forms.TextInput(attrs={'disabled':''})\n }\n initial = {\n 'date': ''\n }\n\n# class MusicianForm(forms.ModelForm):\n# class Meta:\n# model = MusicianProfile\n# fields = ('nickname', 'genres', 'instruments', 'soundcloud', 'birth_date', 'adress', 'phone', 'description','image')\n# labels = {\n# 'nickname': 'Ваш творческий ник:',\n# 'genres': 'Жанры Вашего творчества',\n# 'instruments':'Инструменты которыми вы владете',\n# 'soundcloud':'Ссылка на soundcloud (опционально)',\n# 'birth_date': 'Дата рождения',\n# 'adress': 'Укажите свой город',\n# 'phone': 'Телефон',\n# 'description': 'Расскажите пару слов о себе',\n# }\n# widgets = {\n# 'birth_date': forms.TextInput(attrs={'class':'datepicker'}),\n# }\n# initial = {\n# 'birth_date': ''\n# }\n \n# class CompanyForm(forms.ModelForm):\n# class Meta:\n# model = CompanyProfile\n# fields = ('company', 'adress', 'phone', 'description','image')\n# labels = {\n# 'company':'Название вашей компании',\n# 'birth_date': 'Дата рождения',\n# 'adress': 'Укажите свой город',\n# 'phone': 'Телефон',\n# 'description': 'Расскажите пару слов о себе',\n# }\n# widgets = {\n# 'birth_date': forms.TextInput(attrs={'class':'datepicker'}),\n# }\n# initial = {\n# 'birth_date': ''\n# } \n\n\n'''\nclass PersonForm(forms.Form):\n \n\n #name=forms.CharField(required=True, max_length=200, label='Введите имя:', error_messages={'required': 'Please enter your name'})\n\n #password=forms.CharField(widget = forms.PasswordInput, required=True)\n #confirm_password = forms.CharField(widget = forms.PasswordInput, required=True)\n date=forms.CharField(widget=forms.TextInput(attrs={'class':'datepicker'}), required=True)\n \n #spec=forms.IntegerField()\n #phone=forms.CharField(max_length=20)\n \n description=forms.CharField(widget = forms.Textarea, initial ='', required=False)\n image = forms.ImageField(initial ='/pic_folder/None/no_img.jpg')\n\n\n\n \n def clean_password(self):\n cleaned_data = super(PersonForm, self).clean()\n password = cleaned_data.get(\"password\")\n confirm_password = cleaned_data.get(\"confirm_password\")\n\n if password != confirm_password:\n raise forms.ValidationError(\n \"password and confirm_password does not match\"\n )\n'''\n\n"
},
{
"alpha_fraction": 0.4278438091278076,
"alphanum_fraction": 0.441426157951355,
"avg_line_length": 17.4375,
"blob_id": "5b1dc2a62f5bfda2e386591979dcbd06a06005bd",
"content_id": "95ee63229f26235416dbc14497b8980687116ba6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 597,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 32,
"path": "/diogen/userApp/templates/userApp/profile.html",
"repo_name": "BabyRage21/Project_Dio",
"src_encoding": "UTF-8",
"text": "{% extends \"mainPage/wrapper.html\" %}\n\n\n\n{% block page %}\n\n<!doctype html>\n<html>\n <head>\n <title> {{ profile.name }}</title>\n </head>\n<body>\n <h1> {{ profile.name }}</h1>\n <p>{{ profile.registertime }}</p>\n\n <center>\n <img src = \"{{ profile.image.url }}\" height=”300” width=”400”/>\n {{ userprofile.email}}\n {{userprofile.first_name}}\n {{userprofile.last_name}}\n {{profile.instruments}}\n \n \n \n \n </center>\n<br/>\n\n </body>\n</html>\n\n{%endblock%}"
},
{
"alpha_fraction": 0.821052610874176,
"alphanum_fraction": 0.821052610874176,
"avg_line_length": 27.600000381469727,
"blob_id": "82dddfc7271c963cb02b5b512b3f50a7db69347e",
"content_id": "f994f1847dbfc347e771fd8528d951a8407b6629",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 10,
"path": "/diogen/userApp/admin.py",
"repo_name": "BabyRage21/Project_Dio",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom userApp.models import *\n# Register your models here.\n\n#admin.site.register(Person, PersonModelAdmin)\nadmin.site.register(PersonProfile)\n\nadmin.site.register(EventProfile)\n# admin.site.register(MusicianProfile)\n# admin.site.register(CompanyProfile)"
}
] | 6 |
farimano/german-review-analysis
|
https://github.com/farimano/german-review-analysis
|
490b206a0e3c60959e85938bde1707dd6b8c0c87
|
9cee4d8838e7a195a328a4b2f24410965c45aa77
|
b43a2ba2dc7b343d7d87ba7a14a7c13be7c155d5
|
refs/heads/main
| 2023-05-14T06:28:00.386455 | 2021-06-01T20:57:40 | 2021-06-01T20:57:40 | 372,930,819 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7964912056922913,
"alphanum_fraction": 0.8052631616592407,
"avg_line_length": 141.5,
"blob_id": "6a35de2610b6a2588cd45b05979835e81ad5904c",
"content_id": "768e74d92892dcad1f69185358a8959cca859d11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 570,
"license_type": "no_license",
"max_line_length": 382,
"num_lines": 4,
"path": "/README.md",
"repo_name": "farimano/german-review-analysis",
"src_encoding": "UTF-8",
"text": "# analysis_of_german_reviews\nTone analysis of German reviews on smartphones (More details in 05_tone_analysis.ipynb). This work also includes data visualization and tests hypothesis about whether Samsumg and Apple have the same level of user satisfaction or not (More details in 02_data_analysis.ipynb). All data were received by web scraping, with help of such popular Python libraries like bs4 and selenium. \n \nOn this link you can see demonstration and brief review of this project - https://share.streamlit.io/farimano/analysis_of_german_reviews/main/demo/demo.py\n"
},
{
"alpha_fraction": 0.6160565614700317,
"alphanum_fraction": 0.6297836899757385,
"avg_line_length": 26.31818199157715,
"blob_id": "be4c4ca3ddf47f90573db2bfc7a7e5fe8794f70d",
"content_id": "325bb6ad52c098016bf5867634da565618b70758",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2409,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 88,
"path": "/demo/demo.py",
"repo_name": "farimano/german-review-analysis",
"src_encoding": "UTF-8",
"text": "import re\nfrom joblib import load\nimport pandas as pd\nimport spacy\nfrom PIL import Image\n\nimport spacy.cli \nspacy.cli.download(\"de_core_news_sm\")\n\nimport streamlit as st\n\n\nst.title('Tone analysis of smartphone reviews on German')\n\nst.markdown(\n \"\"\"\n <style>\n .reportview-container {\n background: url(\"https://www.myclickmagazine.com/wp-content/uploads/2019/02/Photographing_Minimalism_Composition_Negative_Space_by_Dana_Walton_18.jpg\")\n }\n .font-1 {\n font-size:30px !important;\n color: darkblue;\n }\n\n .font-2 {\n font-size:30px !important;\n color: forestgreen;\n }\n\n .font-3 {\n font-size:20px !important;\n color: black;\n }\n \"\"\",\n unsafe_allow_html=True\n)\n\nmodel = load('demo/model.joblib')\n\npunct_list = '[\"#$%&\\'()*+,-./:;<=>@[\\]^_`{|}~’‘´`\\']'\nnlp = spacy.load('de_core_news_sm')\nd = {0:'negative review.', 1:'positive review.'}\n\noption = st.text_input('Type in your review on smartphone (only German language supported)')\n\nX_test = option\n\ns = 'Review:\\t' + X_test\nst.markdown(f'<p class=\"font-1\">{s}</p>', unsafe_allow_html=True)\n\nif X_test:\n \n X_test = X_test.lower()\n X_test = re.sub(punct_list, \"\", X_test)\n X_test = re.sub(\"\\d+\", \"\", X_test)\n X_test = \" \".join([token.lemma_ for token in nlp(X_test)])\n\n pred = model.predict([X_test])\n pred = d[pred[0]]\n confidence = abs(model.predict_proba([X_test])[0][0] - 0.5)\n\n if confidence < 0.1:\n confidence = 'Maybe it is'\n elif confidence > 0.4:\n confidence = 'It is almost sure that it is'\n else:\n confidence = 'It is possible that it is'\n \n s = \"Conclusion:\\t\" + \" \".join([confidence, pred])\n\n st.markdown(f'<p class=\"font-2\">{s}</p>', unsafe_allow_html=True)\n\ntop = pd.read_csv('demo/top.csv')\n\nif st.checkbox('Show top-20 collocations for positive and negative reviews'):\n top\n\nif st.checkbox('Show visual analysis of brands'):\n for i in ['Price_dist_brand.png', 'Rate_dist_brands.png', 'review_dist.png']:\n image = Image.open(f'demo/{i}')\n st.image(image)\n\nif st.checkbox('Show the comparison between rates of Apple and Samsung'):\n image = Image.open('demo/samsung_apple.png')\n st.image(image)\n s = 'Using Mann-Whitney\\'s Test, we can establish that rates of user satisfaction are rather different for Apple and Samsung.'\n st.markdown(f'<p class=\"font-3\">{s}</p>', unsafe_allow_html=True)\n"
},
{
"alpha_fraction": 0.44859811663627625,
"alphanum_fraction": 0.672897219657898,
"avg_line_length": 14.428571701049805,
"blob_id": "6bd628a187c16848357e78a8b9aecda0a3366e12",
"content_id": "44bf4248ba79d21784bdefd1f2e5f75ccec3a4bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 7,
"path": "/demo/requirements.txt",
"repo_name": "farimano/german-review-analysis",
"src_encoding": "UTF-8",
"text": "Pillow==8.2.0\nstreamlit==0.81.0\njoblib==1.0.1\npandas==1.2.4\nnumpy==1.20.1\nscikit-learn==0.24.1\nspacy==2.3.5"
}
] | 3 |
phoenixx1/Market-Analysis
|
https://github.com/phoenixx1/Market-Analysis
|
31ec0f5061e3968a734d5c771f58b2cb1dd750ed
|
2da8086a7001105824785f2029076cf04ef16b46
|
06181c1b68fc72ec2cd22eba233a8ddaf203e28c
|
refs/heads/master
| 2023-04-11T14:25:03.367165 | 2021-04-11T15:03:14 | 2021-04-11T15:03:14 | 321,118,780 | 0 | 1 | null | 2020-12-13T17:18:43 | 2021-04-11T14:31:52 | 2021-04-11T15:00:51 |
JavaScript
|
[
{
"alpha_fraction": 0.6180555820465088,
"alphanum_fraction": 0.6284722089767456,
"avg_line_length": 22.25,
"blob_id": "6eb4c14fc7d8dc75b3e5d2117ba0f34f90b0131e",
"content_id": "eace1a97757dbe06f82b52b6edf67186adea52fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 12,
"path": "/API/technicals/indicators.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "from flask_restful import Resource\nfrom flask import jsonify\nimport pandas as pd\nimport json\nimport talib\n\nclass Studies(Resource):\n def get(self, cName: str):\n p = open('./data/' + cName + '.json')\n prices = json.load(p)\n\n return {'prices': prices}, 200\n \n"
},
{
"alpha_fraction": 0.5562353134155273,
"alphanum_fraction": 0.5609411597251892,
"avg_line_length": 24.60240936279297,
"blob_id": "df3896b9ac043557f3afedf7c7a44750116e33c6",
"content_id": "3663bbb1624348da064ad2eb4fbf08ff1ecbdf94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2125,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 83,
"path": "/API/test.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport talib\nimport json\nimport numpy as np\nimport glob\n\n# Auto, Banks, Banks-PSU, Beverages, Chemicals\n# Computers, Construction, Consumer Goods, Finance, FMCG\n\ncompany_list = glob.glob(\"NewData/*.csv\")\n\ndef applyStudy(companyName, studyName):\n with open('talib.json') as f:\n studies = json.load(f)\n\n data = pd.read_csv(str(companyName))\n\n data[\"Volume\"] = data[\"Volume\"].astype('float')\n data[\"Open\"] = data[\"Open\"].astype('float')\n data[\"High\"] = data[\"High\"].astype('float')\n data[\"Low\"] = data[\"Low\"].astype('float')\n data[\"Close\"] = data[\"Close\"].astype('float')\n\n\n argument_list = studies[studyName]\n argument_data = []\n for arg in argument_list:\n argument_data.append(data[arg].to_numpy())\n\n\n return_list = getattr(talib, studyName)(*argument_data)\n ret = {}\n\n if type(return_list) is tuple:\n for i in range(0, len(return_list)):\n ret[i] = return_list[i].tolist()\n return_q = len(return_list)\n\n else:\n ret = return_list.tolist()\n return_q = 1\n\n return ret\n\ndef ret_csv(cName: str):\n with open('talib.json') as f:\n studies = json.load(f)\n\n data = pd.read_csv(cName)\n \n for i in studies.keys():\n study = applyStudy(cName, i)\n if type(study) == list:\n study = np.around(np.array(study),2)\n data[i] = study\n if type(study) == dict:\n for key in study.keys():\n data[str(i) + str(key)] = study[key]\n data.to_csv('a/' + cName[8:], index=False)\n\nfor i in company_list:\n try:\n ret_csv(i)\n except:\n print(i)\n\n\n# import pandas as pd\n# import glob\n\n# clist = glob.glob(\"../NSE/*.csv\")\n\n# for company in clist:\n# df = pd.read_csv(company)\n\n# if \"Open\" not in df.columns:\n# df.columns = [\"Date\", \"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]\n \n# df['Date'] = df['Date'].apply(str)\n# for i in range(0, len(df['Date'])):\n# df['Date'][i] = df['Date'][i][:4] + '-' + df['Date'][i][4:6] + '-' + df['Date'][i][6:]\n\n# df.to_csv('data/' + str(company[7:]), index = False)\n"
},
{
"alpha_fraction": 0.6440993547439575,
"alphanum_fraction": 0.6844720244407654,
"avg_line_length": 40.30769348144531,
"blob_id": "682ba81e738bbdce911aa326679ee3a20216d4e5",
"content_id": "09b2d0888f264079ffd767b6680af84a43e08a21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1610,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 39,
"path": "/API/models/sd.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndata = pd.read_csv(\"../data/AXISBANK.csv\", parse_dates=['Date'], index_col='Date', \n usecols=['Date', 'Open','High','Low','Close','Volume'])\n\ndata = data.reset_index()\nlag_features = [\"Open\", \"High\", \"Low\", \"Close\", \"VWAP\", \"Volume\"]\nwindow1 = 3\nwindow2 = 7\nwindow3 = 30\n\ndf_rolled_3d = data[lag_features].rolling(window=window1, min_periods=0)\ndf_rolled_7d = data[lag_features].rolling(window=window2, min_periods=0)\ndf_rolled_30d = data[lag_features].rolling(window=window3, min_periods=0)\n\ndf_mean_3d = df_rolled_3d.mean().shift(1).reset_index().astype(np.float32)\ndf_mean_7d = df_rolled_7d.mean().shift(1).reset_index().astype(np.float32)\ndf_mean_30d = df_rolled_30d.mean().shift(1).reset_index().astype(np.float32)\n\ndf_std_3d = df_rolled_3d.std().shift(1).reset_index().astype(np.float32)\ndf_std_7d = df_rolled_7d.std().shift(1).reset_index().astype(np.float32)\ndf_std_30d = df_rolled_30d.std().shift(1).reset_index().astype(np.float32)\n\nfor feature in lag_features:\n data[f\"{feature}_mean_lag{window1}\"] = df_mean_3d[feature]\n data[f\"{feature}_mean_lag{window2}\"] = df_mean_7d[feature]\n data[f\"{feature}_mean_lag{window3}\"] = df_mean_30d[feature]\n \n data[f\"{feature}_std_lag{window1}\"] = df_std_3d[feature]\n data[f\"{feature}_std_lag{window2}\"] = df_std_7d[feature]\n data[f\"{feature}_std_lag{window3}\"] = df_std_30d[feature]\n\ndata.set_index(\"Date\", drop=False, inplace=True)\ndata.interpolate(method='time', inplace=True)\ndata.fillna(data.mean(), inplace=True)\ndata.head()"
},
{
"alpha_fraction": 0.5043731927871704,
"alphanum_fraction": 0.5663265585899353,
"avg_line_length": 18.600000381469727,
"blob_id": "931431fd95bf0166416bd0855f044601c4a87f98",
"content_id": "7eedbc5ef983bc2e007749ae577454c8743f823c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1372,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 70,
"path": "/Frontend/analysis/src/components/News/NewsCard.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport styled from \"styled-components\";\n\nfunction NewsCard({ url, image, title, source, desc, time }) {\n return (\n <NewsCardContainer\n id=\"container\"\n onClick={() => window.open(url, \"_blank\")}\n >\n <NewsImage>\n <img src={image} alt={source} />\n </NewsImage>\n\n <NewsDetails>\n <h4>{title}</h4>\n <span>\n <strong>Source: {source}</strong>\n </span>\n <p>{desc}</p>\n <br />\n <span>\n <strong>Published at: {time}</strong>\n </span>\n </NewsDetails>\n </NewsCardContainer>\n );\n}\n\nexport default NewsCard;\n\nconst NewsCardContainer = styled.div`\n display: flex;\n cursor: pointer;\n width: 1000px;\n background: rgba(118, 123, 145, 0.5);\n padding: 10px;\n box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.7), 0 6px 20px 0 rgba(0, 0, 0, 0.19);\n align-items: center;\n justify-content: center;\n margin: 10px;\n\n @media screen and (max-width: 1000px) {\n width: 800px;\n }\n @media screen and (max-width: 800px) {\n width: 600px;\n }\n\n :hover {\n background: rgba(134, 140, 152, 0.6);\n transform: scale(1.01);\n }\n`;\nconst NewsImage = styled.div`\n flex: 0.3;\n > img {\n object-fit: fill;\n width: 256px;\n height: 148px;\n }\n`;\n\nconst NewsDetails = styled.div`\n flex: 0.7;\n margin-left: 14px;\n\n > span {\n color: #51545a;\n }\n`;\n"
},
{
"alpha_fraction": 0.5724999904632568,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 19.3389835357666,
"blob_id": "5b54c4f6b6213dcb1e58e7950d49f167a7d79ce4",
"content_id": "ffa1be948cf75708db9f2dc3c5de291b606ab725",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1200,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 59,
"path": "/Frontend/analysis/src/components/News/index.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport NewsCard from \"./NewsCard\";\nimport styled from \"styled-components\";\nimport { connect } from \"react-redux\";\nimport { fetchNews } from \"../../actions\";\n\nclass News extends React.Component {\n componentDidMount() {\n this.props.fetchNews();\n }\n\n renderList() {\n return this.props.news.map((head) => {\n return head.map((n) => {\n return (\n <NewsCard\n url={n.url}\n image={n.urlToImage}\n title={n.title}\n source={n.source.name}\n desc={n.description}\n time={n.publishedAt}\n />\n );\n });\n });\n }\n\n render() {\n return (\n <NewsContainer>\n <NewsCards>{this.renderList()}</NewsCards>\n </NewsContainer>\n );\n }\n}\n\nconst mapStateToProps = (state) => {\n return { news: state.news };\n};\n\nexport default connect(mapStateToProps, { fetchNews })(News);\n\nconst NewsContainer = styled.div`\n background: #e3e3e3;\n height: 5000px;\n width: 100%;\n display: flex;\n flex-direction: column;\n align-items: center;\n`;\n\nconst NewsCards = styled.div`\n z-index: 1;\n height: 60vh;\n place-items: center;\n display: flex;\n flex-direction: column;\n`;\n"
},
{
"alpha_fraction": 0.6383763551712036,
"alphanum_fraction": 0.6494464874267578,
"avg_line_length": 23.636363983154297,
"blob_id": "56a67bb8097b2674f32109d1732c6a0523d35452",
"content_id": "b038f26b1bd7384265bcbdf9fa182b60b30c09f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 11,
"path": "/API/data_loading/company_data.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "from flask_restful import Resource\nfrom flask import jsonify\nimport pandas as pd\nimport json\n\nclass CompanyData(Resource):\n def get(self, name: str):\n p = open('data_json/' + name + '.json')\n prices = json.load(p)\n\n return {'prices': prices}, 200\n"
},
{
"alpha_fraction": 0.5788113474845886,
"alphanum_fraction": 0.5822566747665405,
"avg_line_length": 23.70212745666504,
"blob_id": "5410ca98e5d726766d022546e8ae7371c316e331",
"content_id": "3024ffeab7e24e8e36a3ab37ca1a425c78991d25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1161,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 47,
"path": "/Frontend/analysis/src/components/Dashboard/prediction/AnalysisPopup.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React, { useState } from \"react\";\nimport { Button, Modal } from \"react-bootstrap\";\nimport styled from \"styled-components\";\nimport RenderChart from \"./RenderChart\";\n\nfunction AnalysisPopup() {\n const [show, setShow] = useState(false);\n const handlePopup = () => setShow(!show);\n\n return (\n <AnalysisPopupContainer>\n <Button variant=\"secondary\" onClick={handlePopup}>\n ARIMA\n </Button>\n\n <Modal\n size=\"lg\"\n // dialogClassName=\"modal-90w\"\n show={show}\n onHide={handlePopup}\n backdrop=\"static\"\n keyboard={false}\n >\n <Modal.Header>\n <Modal.Title>ARIMA</Modal.Title>\n </Modal.Header>\n <Modal.Body>\n <RenderChart />\n </Modal.Body>\n <Modal.Footer>\n <Button variant=\"secondary\" onClick={handlePopup}>\n Close\n </Button>\n {/* <Button variant=\"primary\" onClick={handleClose}>\n Save Changes\n </Button> */}\n </Modal.Footer>\n </Modal>\n </AnalysisPopupContainer>\n );\n}\n\nexport default AnalysisPopup;\n\nconst AnalysisPopupContainer = styled.div`\n margin-left: 20px;\n`;\n"
},
{
"alpha_fraction": 0.5782024264335632,
"alphanum_fraction": 0.5796178579330444,
"avg_line_length": 25.16666603088379,
"blob_id": "1d01b2a6104c15bf7eaa81be5e91fd0e9edadfe1",
"content_id": "6f30255c57bbde77000a6a222a0fa21a9227df6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1413,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 54,
"path": "/Frontend/analysis/src/components/Dashboard/CustomizationNavChart.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport { Dropdown, DropdownButton } from \"react-bootstrap\";\nimport { connect } from \"react-redux\";\nimport styled from \"styled-components\";\nimport { setChartType } from \"../../actions\";\n\nfunction CustomizationNav({ items, title, currentType, setChartType }) {\n const updateType = (event, item) => {\n event.preventDefault();\n\n setChartType(item);\n };\n const direction = \"right\";\n return (\n <DropDownContainer>\n <DropdownButton\n id=\"dropdown-variants-secondary\"\n variant=\"secondary\"\n title={title}\n >\n {/* <DropdownButton\n id=\"dropdown-variants-secondary\"\n variant=\"secondary\"\n title={title}\n drop=\"right\"\n >\n <Dropdown.Item>Hi</Dropdown.Item>\n </DropdownButton> */}\n {/* <Dropdown.Header>Head</Dropdown.Header>\n <Dropdown.Divider /> */}\n {items.map((item) => {\n return (\n <Dropdown.Item\n href=\"#\"\n onClick={(event) => updateType(event, item)}\n >\n {item}\n </Dropdown.Item>\n );\n })}\n </DropdownButton>\n </DropDownContainer>\n );\n}\n\nconst mapStateToProps = (state) => ({\n currentType: state.currentChartType,\n});\n\nexport default connect(mapStateToProps, { setChartType })(CustomizationNav);\n\nconst DropDownContainer = styled.div`\n margin-left: 20px;\n`;\n"
},
{
"alpha_fraction": 0.5109589099884033,
"alphanum_fraction": 0.515753448009491,
"avg_line_length": 11.920353889465332,
"blob_id": "94c0198706079064dc932020bdf3654519429aaa",
"content_id": "c61dad5c58fc4ae12445e661e8b4771c7a6df202",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1460,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 113,
"path": "/Frontend/analysis/src/components/Dashboard/studiesList.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "export const OverlapStudies = [\n \"BBANDS\",\n \"DEMA\",\n \"EMA\",\n \"HT_TRENDLINE\",\n \"KAMA\",\n \"MAMA\",\n \"MIDPOINT\",\n \"MIDPRICE\",\n \"SAR\",\n \"SAREXT\",\n \"SMA\",\n \"T3\",\n \"TEMA\",\n \"TRIMA\",\n \"WMA\",\n];\nexport const Studies = [\n \"MIDPOINT\",\n \"MIDPRICE\",\n \"ADX\",\n \"ADXR\",\n \"APO\",\n \"AROON\",\n \"AROONOSC\",\n \"BOP\",\n \"CCI\",\n \"CMO\",\n \"DX\",\n \"MACD\",\n \"MACDEXT\",\n \"MACDFIX\",\n \"MFI\",\n \"MINUS_DI\",\n \"MINUS_DM\",\n \"MOM\",\n \"PLUS_DI\",\n \"PLUS_DM\",\n \"PPO\",\n \"ROC\",\n \"ROCP\",\n \"ROCR\",\n \"ROCR100\",\n \"RSI\",\n \"STOCH\",\n \"STOCHF\",\n \"STOCHRSI\",\n \"TRIX\",\n \"ULTOSC\",\n \"WILLR\",\n \"AD\",\n \"ADOSC\",\n \"OBV\",\n \"ATR\",\n \"NATR\",\n \"TRANGE\",\n \"AVGPRICE\",\n \"MEDPRICE\",\n \"TYPPRICE\",\n \"WCLPRICE\",\n \"HT_DCPERIOD\",\n \"HT_DCPHASE\",\n \"HT_PHASOR\",\n \"HT_SINE\",\n \"HT_TRENDMODE\",\n];\n\nexport const MomentumIndicators = [\n \"ADX\",\n \"ADXR\",\n \"APO\",\n \"AROON\",\n \"AROONOSC\",\n \"BOP\",\n \"CCI\",\n \"CMO\",\n \"DX\",\n \"MACD\",\n \"MACDEXT\",\n \"MACDFIX\",\n \"MFI\",\n \"MINUS_DI\",\n \"MINUS_DM\",\n \"MOM\",\n \"PLUS_DI\",\n \"PLUS_DM\",\n \"PPO\",\n \"ROC\",\n \"ROCP\",\n \"ROCR\",\n \"ROCR100\",\n \"RSI\",\n \"STOCH\",\n \"STOCHF\",\n \"STOCHRSI\",\n \"TRIX\",\n \"ULTOSC\",\n \"WILLR\",\n];\n\nexport const VolumeIndicators = [\"AD\", \"ADOSC\", \"OBV\"];\n\nexport const VolatilityIndicators = [\"ATR\", \"NATR\", \"TRANGE\"];\n\nexport const PriceTransform = [\"AVGPRICE\", \"MEDPRICE\", \"TYPPRICE\", \"WCLPRICE\"];\n\nexport const CycleIndicators = [\n \"HT_DCPERIOD\",\n \"HT_DCPHASE\",\n \"HT_PHASOR\",\n \"HT_SINE\",\n \"HT_TRENDMODE\",\n];\n"
},
{
"alpha_fraction": 0.47135335206985474,
"alphanum_fraction": 0.4837411046028137,
"avg_line_length": 27.57522201538086,
"blob_id": "25321c8c5f54fddafc941906f87eb936d497148b",
"content_id": "b730ab4d3d477afac730e198f2175138ed89c420",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3229,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 113,
"path": "/Frontend/analysis/src/components/Dashboard/Charts/ChartIndicators.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport { format } from \"d3-format\";\nimport { LineSeries } from \"react-stockcharts/lib/series\";\nimport { sma } from \"react-stockcharts/lib/indicator\";\nimport Chart from \"react-stockcharts/lib/Chart\";\nimport XAxis from \"react-stockcharts/lib/axes/XAxis\";\nimport YAxis from \"react-stockcharts/lib/axes/YAxis\";\nimport MouseCoordinateY from \"react-stockcharts/lib/coordinates/MouseCoordinateY\";\nimport { Studies } from \"../studiesList\";\nimport MouseCoordinateX from \"react-stockcharts/lib/coordinates/MouseCoordinateX\";\nimport { timeFormat } from \"d3-time-format\";\n\nclass ChartIndicators extends React.Component {\n render() {\n const {\n data: initialData,\n indicators,\n updateIndicators,\n height,\n selectedStudy,\n } = this.props;\n\n // MIDPRICE\n const midpr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MIDPRICE = c;\n })\n .accessor((d) => d.MIDPRICE)\n .stroke(\"blue\");\n\n // MIDPOINT\n const midpt50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MIDPOINT = c;\n })\n .accessor((d) => d.MIDPOINT)\n .stroke(\"red\");\n\n // ADX\n const adx50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ADX = c;\n })\n .accessor((d) => d.ADX)\n .stroke(\"blue\");\n const midpriceChart = () => {\n console.log(\"call made\");\n switch (selectedStudy) {\n case \"MIDPRICE\":\n return (\n <Chart\n yExtents={[(d) => [d.MIDPRICE]]}\n // yExtents={yExtents}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={midpr50.accessor()}\n stroke={midpr50.stroke()}\n />\n </Chart>\n );\n case \"ADX\":\n return (\n <Chart\n yExtents={[(d) => [d.ADX]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={adx50.accessor()}\n stroke={adx50.stroke()}\n />\n </Chart>\n );\n default:\n return <></>;\n }\n };\n return <>{midpriceChart}</>;\n }\n}\n\nexport default ChartIndicators;\n"
},
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 24,
"blob_id": "dcb1099830473d755061c6b6f6e85a6cec90d780",
"content_id": "6d9773441ec2657889fd3a5165c581083f217609",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 1,
"path": "/API/models/__init__.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "from .arima import ARIMA"
},
{
"alpha_fraction": 0.6570915579795837,
"alphanum_fraction": 0.6582884788513184,
"avg_line_length": 27.810344696044922,
"blob_id": "eca7b4efe9702eff71ccfde93fea22f4402b1b73",
"content_id": "efc2cb7344e8a8c610501ff15813d12b4e660797",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1671,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 58,
"path": "/Frontend/analysis/src/components/Dashboard/CustomizationNavStudy.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport { Dropdown, DropdownButton } from \"react-bootstrap\";\nimport { connect } from \"react-redux\";\nimport styled from \"styled-components\";\nimport { loadStudies } from \"../../actions\";\nimport {\n CycleIndicators,\n MomentumIndicators,\n OverlapStudies,\n PriceTransform,\n VolatilityIndicators,\n VolumeIndicators,\n} from \"./studiesList\";\nimport StudyDropdown from \"./StudyDropdown\";\n\nfunction CustomizationNav({ title, study, loadStudies }) {\n const updateType = (event, item) => {\n event.preventDefault();\n loadStudies(item);\n };\n\n return (\n <DropDownContainer>\n <DropdownButton\n id=\"dropdown-variants-secondary\"\n variant=\"secondary\"\n title={title}\n style={{ display: \"flex\" }}\n >\n <StudyDropdown name={\"Overlap Studies\"} list={OverlapStudies} />\n <StudyDropdown name={\"Momentum Indicators\"} list={MomentumIndicators} />\n <StudyDropdown name={\"Volume Indicators\"} list={VolumeIndicators} />\n <StudyDropdown\n name={\"Volatility Indicators\"}\n list={VolatilityIndicators}\n />\n <StudyDropdown name={\"Price Transform\"} list={PriceTransform} />\n <StudyDropdown name={\"Cycle Indicators\"} list={CycleIndicators} />\n\n <Dropdown.Divider />\n\n <Dropdown.Item href=\"#\" onClick={(event) => updateType(event, \"Clear\")}>\n Clear\n </Dropdown.Item>\n </DropdownButton>\n </DropDownContainer>\n );\n}\n\nconst mapStateToProps = (state) => ({\n study: state.loadStudies,\n});\n\nexport default connect(mapStateToProps, { loadStudies })(CustomizationNav);\n\nconst DropDownContainer = styled.div`\n margin-left: 20px;\n`;\n"
},
{
"alpha_fraction": 0.6295811533927917,
"alphanum_fraction": 0.665248692035675,
"avg_line_length": 38.19230651855469,
"blob_id": "e9e0bb4ce9c040e06c5c5a5b44ab0a2d453945c9",
"content_id": "2210cfd26d381420594f8b8d8cb5df67ab7033d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3056,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 78,
"path": "/API/models/testmodel.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport ta\nimport pickle\n# # import missingno as msno\n# import plotly.express as px\n# import plotly.graph_objects as go\nimport matplotlib.dates as mdates\nimport scipy.stats\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nimport pylab\nfrom pmdarima import auto_arima\n\ndf = pd.read_csv('../data/TATAMOTORS.csv')\n\ndf = df.reset_index()\n\na = ta.volume.VolumeWeightedAveragePrice(df.High, df.Low, df.Close, df.Volume)\ndf['VWAP'] = a.volume_weighted_average_price() \n\nlag_features = [\"High\", \"Low\", \"Volume\", \"Close\"]\nwindow1 = 3\nwindow2 = 7\nwindow3 = 30\n\ndf_rolled_3d = df[lag_features].rolling(window=window1, min_periods=0)\ndf_rolled_7d = df[lag_features].rolling(window=window2, min_periods=0)\ndf_rolled_30d = df[lag_features].rolling(window=window3, min_periods=0)\n\ndf_mean_3d = df_rolled_3d.mean().shift(1).reset_index().astype(np.float32)\ndf_mean_7d = df_rolled_7d.mean().shift(1).reset_index().astype(np.float32)\ndf_mean_30d = df_rolled_30d.mean().shift(1).reset_index().astype(np.float32)\n\ndf_std_3d = df_rolled_3d.std().shift(1).reset_index().astype(np.float32)\ndf_std_7d = df_rolled_7d.std().shift(1).reset_index().astype(np.float32)\ndf_std_30d = df_rolled_30d.std().shift(1).reset_index().astype(np.float32)\n\nfor feature in lag_features:\n df[f\"{feature}_mean_lag{window1}\"] = df_mean_3d[feature]\n df[f\"{feature}_mean_lag{window2}\"] = df_mean_7d[feature]\n df[f\"{feature}_mean_lag{window3}\"] = df_mean_30d[feature]\n \n df[f\"{feature}_std_lag{window1}\"] = df_std_3d[feature]\n df[f\"{feature}_std_lag{window2}\"] = df_std_7d[feature]\n df[f\"{feature}_std_lag{window3}\"] = df_std_30d[feature]\n\ndf.fillna(df.mean(), inplace=True)\n\ndf.set_index(\"Date\", drop=False, inplace=True)\n\ndf.Date = pd.to_datetime(df.Date, format=\"%Y-%m-%d\")\ndf[\"month\"] = df.Date.dt.month\ndf[\"week\"] = df.Date.dt.isocalendar().week\ndf[\"day\"] = df.Date.dt.day\ndf[\"day_of_week\"] = df.Date.dt.dayofweek\n\ndf_train = df[df.Date < \"2019\"]\ndf_valid = df[df.Date >= \"2019\"]\n\nexogenous_features = [\"High_mean_lag3\", \"High_std_lag3\", \"Low_mean_lag3\", \"Low_std_lag3\",\n \"Volume_mean_lag3\", \"Volume_std_lag3\", \"High_mean_lag7\", \"High_std_lag7\", \n \"Low_mean_lag7\", \"Low_std_lag7\", \"Volume_mean_lag7\", \"Volume_std_lag7\", \n \"High_mean_lag30\", \"High_std_lag30\", \"Low_mean_lag30\", \"Low_std_lag30\",\n \"Volume_mean_lag30\", \"Volume_std_lag30\", \"Close_mean_lag3\", \"Close_mean_lag7\",\n \"Close_mean_lag30\",\"Close_std_lag3\",\"Close_std_lag7\",\"Close_std_lag30\",\n \"month\",\"week\",\"day\",\"day_of_week\"]\n\n\nwith open('AXISBANK.pkl', 'rb') as pkl:\n # pickle_preds = pickle.load(pkl).predict(n_periods=5)\n forecast = pickle.load(pkl).predict(n_periods=len(df_valid), exogenous=df_valid[exogenous_features])\n df_valid[\"Forecast_ARIMAX\"] = forecast\n\n# print(df_valid[\"Forecast_ARIMAX\"])\ndf_valid[[\"VWAP\", \"Forecast_ARIMAX\"]].plot(figsize=(14, 7))\nplt.show()"
},
{
"alpha_fraction": 0.5813683867454529,
"alphanum_fraction": 0.600695788860321,
"avg_line_length": 24.116504669189453,
"blob_id": "0b15712614a3ccd2740ab7be30af1df3547dcba1",
"content_id": "39a7fd5a817d53b1e5fe9ff565bbbaf733091e07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2587,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 103,
"path": "/Frontend/analysis/src/components/Home/Search.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport styled from \"styled-components\";\nimport TextField from \"@material-ui/core/TextField\";\nimport Autocomplete from \"@material-ui/lab/Autocomplete\";\nimport { connect } from \"react-redux\";\nimport { fetchNames, setName } from \"../../actions\";\nimport { withRouter } from \"react-router-dom\";\n\nclass Search extends React.Component {\n componentDidMount() {\n this.props.fetchNames();\n }\n\n showResult(event, value) {\n const { history } = this.props;\n if (history) history.push(\"/dashboard\");\n let fullName = value.split(\":\").map((item) => item.trim());\n\n this.props.setName(fullName[1]);\n }\n\n render() {\n let companyList = [];\n this.props.names.map((name) => {\n return name.map((n) => {\n companyList.push(n.COMPANY + \" : \" + n.SYMBOL);\n });\n });\n\n return (\n <SearchContainer className=\"shadow-lg p-3 mb-5 rounded\">\n <SearchInnerContainer>\n <h1>Search</h1>\n\n <AutocompleteContainer\n freeSolo\n onChange={(event, value) => this.showResult(event, value)}\n disableClearable\n options={companyList}\n renderInput={(params) => (\n <AutoTextField\n id=\"standard-secondary\"\n {...params}\n label=\"Search Company Name\"\n margin=\"normal\"\n variant=\"outlined\"\n InputProps={{ ...params.InputProps, type: \"search\" }}\n />\n )}\n />\n </SearchInnerContainer>\n </SearchContainer>\n );\n }\n}\n\nconst mapStateToProps = (state) => {\n return { names: state.company, currentCompanyName: state.currentCompany };\n};\n\nexport default withRouter(\n connect(mapStateToProps, { fetchNames, setName })(Search)\n);\n\nconst AutocompleteContainer = styled(Autocomplete)`\n width: 600px;\n border-radius: 50px;\n`;\n\nconst AutoTextField = styled(TextField)`\n background-color: lightgray;\n border-radius: 50px;\n`;\n\nconst SearchContainer = styled.div`\n /* Use rgba value for not applying opacity property to child elements */\n /* background: rgba(54, 62, 74, 0.5); */\n height: 35vh;\n display: grid;\n place-items: center;\n`;\n\nconst SearchInnerContainer = styled.div`\n display: flex;\n align-items: center;\n flex-direction: column;\n padding: 100px;\n\n > h1 {\n color: #000;\n font-size: 80px;\n margin-top: -100px;\n\n @media screen and (max-width: 960px) {\n font-size: 50px;\n margin-top: -150px;\n }\n @media screen and (max-width: 768px) {\n font-size: 30px;\n margin-top: -100px;\n }\n }\n`;\n"
},
{
"alpha_fraction": 0.7914201021194458,
"alphanum_fraction": 0.7914201021194458,
"avg_line_length": 32.79999923706055,
"blob_id": "445b32b3b321a2c918152245781b827c89a2d35f",
"content_id": "e1d0147e1c89f8fd80bfe533caeb8aa3997488d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 676,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 20,
"path": "/Frontend/analysis/src/reducers/index.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import { combineReducers } from \"redux\";\nimport nameReducer from \"./nameReducer\";\nimport newsReducer from \"./newsReducer\";\nimport priceReducer from \"./priceReducer\";\nimport selectedCompanyReducer from \"./selectedCompanyReducer\";\nimport chartReducer from \"./chartReducer\";\nimport studyReducer from \"./studyReducer\";\nimport setStudyReducer from \"./setStudyReducer\";\nimport arimaReducer from './arimaReducer';\n\nexport default combineReducers({\n company: nameReducer,\n news: newsReducer,\n prices: priceReducer,\n currentCompany: selectedCompanyReducer,\n currentChartType: chartReducer,\n loadStudies: studyReducer,\n selectedStudy: setStudyReducer,\n arima: arimaReducer,\n});\n"
},
{
"alpha_fraction": 0.5564774870872498,
"alphanum_fraction": 0.5800490379333496,
"avg_line_length": 38.2814826965332,
"blob_id": "d871f778a1ffd3f00fa8c385cee3c829dcca8103",
"content_id": "b2f9a076cc6403e7beafce2a2e6f52fc75dc66ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5303,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 135,
"path": "/API/models/arima.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport ta\nimport pickle\nfrom pmdarima import auto_arima\nimport glob\n\n# For API\nfrom flask_restful import Resource\nfrom flask import jsonify\nimport json\n\nclass ARIMAModel:\n df = pd.DataFrame()\n df_train = pd.DataFrame()\n df_test = pd.DataFrame()\n \n exogenous_features = [\"High_mean_lag3\", \"High_std_lag3\", \"Low_mean_lag3\", \"Low_std_lag3\",\n \"Volume_mean_lag3\", \"Volume_std_lag3\", \"High_mean_lag7\", \"High_std_lag7\", \n \"Low_mean_lag7\", \"Low_std_lag7\", \"Volume_mean_lag7\", \"Volume_std_lag7\", \n \"High_mean_lag30\", \"High_std_lag30\", \"Low_mean_lag30\", \"Low_std_lag30\",\n \"Volume_mean_lag30\", \"Volume_std_lag30\", \"Close_mean_lag3\", \"Close_mean_lag7\",\n \"Close_mean_lag30\",\"Close_std_lag3\",\"Close_std_lag7\",\"Close_std_lag30\",\n \"month\",\"week\",\"day\",\"day_of_week\"]\n\n def __init__(self, cName: str):\n self.name = cName\n \n def load_data(self):\n self.df = pd.read_csv('data/' + self.name + '.csv')\n self.df = self.df.reset_index()\n\n a = ta.volume.VolumeWeightedAveragePrice(self.df.High, self.df.Low, self.df.Close, self.df.Volume)\n self.df['VWAP'] = a.volume_weighted_average_price() \n\n self.df_train = self.df[self.df.Date < \"2019\"]\n self.df_valid = self.df[self.df.Date >= \"2019\"]\n\n\n def add_lag_features(self):\n lag_features = [\"High\", \"Low\", \"Volume\", \"Close\"]\n window1 = 3\n window2 = 7\n window3 = 30\n\n df_rolled_3d = self.df[lag_features].rolling(window=window1, min_periods=0)\n df_rolled_7d = self.df[lag_features].rolling(window=window2, min_periods=0)\n df_rolled_30d = self.df[lag_features].rolling(window=window3, min_periods=0)\n\n df_mean_3d = df_rolled_3d.mean().shift(1).reset_index().astype(np.float32)\n df_mean_7d = df_rolled_7d.mean().shift(1).reset_index().astype(np.float32)\n df_mean_30d = df_rolled_30d.mean().shift(1).reset_index().astype(np.float32)\n\n df_std_3d = df_rolled_3d.std().shift(1).reset_index().astype(np.float32)\n df_std_7d = df_rolled_7d.std().shift(1).reset_index().astype(np.float32)\n df_std_30d = df_rolled_30d.std().shift(1).reset_index().astype(np.float32)\n\n for feature in lag_features:\n self.df[f\"{feature}_mean_lag{window1}\"] = df_mean_3d[feature]\n self.df[f\"{feature}_mean_lag{window2}\"] = df_mean_7d[feature]\n self.df[f\"{feature}_mean_lag{window3}\"] = df_mean_30d[feature]\n \n self.df[f\"{feature}_std_lag{window1}\"] = df_std_3d[feature]\n self.df[f\"{feature}_std_lag{window2}\"] = df_std_7d[feature]\n self.df[f\"{feature}_std_lag{window3}\"] = df_std_30d[feature]\n\n self.df.fillna(self.df.mean(), inplace=True)\n\n self.df.set_index(\"Date\", drop=False, inplace=True)\n\n self.df.Date = pd.to_datetime(self.df.Date, format=\"%Y-%m-%d\")\n self.df[\"month\"] = self.df.Date.dt.month\n self.df[\"week\"] = self.df.Date.dt.isocalendar().week\n self.df[\"day\"] = self.df.Date.dt.day\n self.df[\"day_of_week\"] = self.df.Date.dt.dayofweek\n \n def traintestsplit(self):\n self.df_train = self.df[self.df.Date < \"2019\"]\n self.df_valid = self.df[self.df.Date >= \"2019\"]\n\n def train_model(self):\n if ('models/trainedModels/' + self.name + '.pkl') in glob.glob('models/trainedModels/*.pkl'):\n with open('models/trainedModels/' + self.name + '.pkl', 'rb') as pkl:\n forecast = pickle.load(pkl).predict(n_periods=len(self.df_valid), \n exogenous=self.df_valid[self.exogenous_features])\n self.df_valid[\"Forecast_ARIMA\"] = forecast\n\n else:\n self.model = auto_arima(self.df_train.VWAP, exogenous=self.df_train[self.exogenous_features], \n trace=True, error_action=\"ignore\", suppress_warnings=True)\n self.model.fit(self.df_train.VWAP, exogenous=self.df_train[self.exogenous_features])\n self.save()\n\n forecast = self.model.predict(n_periods=len(self.df_valid), exogenous=self.df_valid[self.exogenous_features])\n self.df_valid[\"Forecast_ARIMA\"] = forecast\n\n # To plot\n # self.plotPred()\n return self.df_valid\n\n\n def save(self):\n with open('models/trainedModels/' + self.name + '.pkl', 'wb') as pkl:\n pickle.dump(self.model, pkl)\n\n def run(self):\n self.load_data()\n self.add_lag_features()\n self.traintestsplit()\n pred = self.train_model()\n \n return pred\n \n\n def plotPred(self):\n self.df_valid[[\"VWAP\", \"Forecast_ARIMA\"]].plot(figsize=(14, 7))\n plt.show()\n\n\nclass ARIMA(Resource):\n def get(self, name: str):\n model = ARIMAModel(name)\n pred = model.run()\n pred1 = pd.DataFrame()\n pred1['Date'] = pred['Date'].astype(str).values\n pred1['VWAP'] = pred['VWAP'].values\n pred1['Forecast_ARIMA'] = pred['Forecast_ARIMA'].values\n \n p = []\n\n for i in pred1.index:\n p.append(pred1.loc[i].to_dict())\n\n return p, 200\n"
},
{
"alpha_fraction": 0.6192560195922852,
"alphanum_fraction": 0.6236323714256287,
"avg_line_length": 36.75,
"blob_id": "345d32dadac701e065fc6f206f9b648ea66d797a",
"content_id": "b1adbf029170b3d0dab8a4e458590d3a8b23325d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 457,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 12,
"path": "/API/data_pre_processing.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import pandas as pd \nimport glob\n\ncompany_list = glob.glob(\"NSE_Data/*.csv\")\n\nfor company in company_list:\n company_file = pd.read_csv(company)\n if (not any(cell.isdigit() for cell in company_file)) == False:\n company_file.columns = [\"Date\", \"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]\n company_file.to_csv('NewData' + str(company[8:]), index = False)\n else:\n company_file.to_csv('NewData' + str(company[8:]), index = False)\n "
},
{
"alpha_fraction": 0.6659038662910461,
"alphanum_fraction": 0.6659038662910461,
"avg_line_length": 32.61538314819336,
"blob_id": "20ba946d19761566d5333fd388c033db7a21f34d",
"content_id": "561c8b7b287191af79c6873873b21301220adfe7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1748,
"license_type": "no_license",
"max_line_length": 209,
"num_lines": 52,
"path": "/Frontend/analysis/src/actions/index.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import gitNames from \"../apis/gitNames\";\nimport newsAPI from \"../apis/newsAPI\";\nimport _ from \"lodash\";\nimport getPrices from \"../apis/getPrices\";\n\n// Used lodash library to solve the issue of refetching the same data after every render or change of page using the _.memoize library it does not call the same API again it checks if it has the values stored.\n\nexport const fetchNames = () => (dispatch) => {\n _fetchNames(dispatch);\n};\nconst _fetchNames = _.memoize(async (dispatch) => {\n const response = await gitNames.get(\"/companyList.json\");\n dispatch({ type: \"FETCH_NAMES\", payload: response.data });\n});\n\nexport const fetchNews = () => (dispatch) => {\n _fetchNews(dispatch);\n};\n\nconst _fetchNews = _.memoize(async (dispatch) => {\n const response = await newsAPI.get();\n dispatch({ type: \"FETCH_NEWS\", payload: response.data.articles });\n});\n\nexport const fetchPrices = (company) => async (dispatch) => {\n const response = await getPrices.get(`/data/${company}`);\n console.log(\"res: \", response.data.prices);\n dispatch({ type: \"FETCH_PRICES\", payload: response.data.prices });\n};\n\n\nexport const fetchArima = (company) => async (dispatch) => {\n const response = await getPrices.get(`/ARIMA/${company}`);\n console.log(\"res: \", response.data);\n dispatch({ type: \"FETCH_ARIMA\", payload: response.data });\n};\n\nexport const setName = (name) => (dispatch) => {\n dispatch({ type: \"SET_NAME\", payload: name });\n};\n\nexport const setChartType = (type) => (dispatch) => {\n dispatch({ type: \"SET_TYPE\", payload: type });\n};\n\nexport const loadStudies = (study) => (dispatch) => {\n dispatch({ type: \"LOAD_STUDY\", payload: study });\n};\n\nexport const latestStudy = (study) => (dispatch) => {\n dispatch({ type: \"SET_STUDY\", payload: study });\n};\n"
},
{
"alpha_fraction": 0.5922226905822754,
"alphanum_fraction": 0.6153846383094788,
"avg_line_length": 30.165563583374023,
"blob_id": "46ef8679b8ed582ca53c62835de826ef012484a4",
"content_id": "0b0a8cfe8b4f1318fbdadbc63fdc5dd09efa221b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4706,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 151,
"path": "/API/models/regression.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "\nimport math\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport time\n\nfrom datetime import date, datetime, time, timedelta\nfrom matplotlib import pyplot as plt\nfrom pylab import rcParams\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\n\n\nfontsize = 14\nticklabelsize = 14\nN_opt = 5\ntest_size = 0.2 # proportion of dataset to be used as test set\ncv_size = 0.2 # proportion of dataset to be used as cross-validation set\nNmax = 30\n\ndef get_preds_lin_reg(df, target_col, N, pred_min, offset):\n regr = LinearRegression(fit_intercept=True)\n pred_list = []\n for i in range(offset, len(df['close'])):\n X_train = np.array(range(len(df['close'][i-N:i]))) \n y_train = np.array(df['close'][i-N:i])\n X_train = X_train.reshape(-1, 1)\n y_train = y_train.reshape(-1, 1)\n\n regr.fit(X_train, y_train)\n pred = regr.predict(np.array(N).reshape(1,-1))\n pred_list.append(pred[0][0]) \n \n pred_list = np.array(pred_list)\n pred_list[pred_list < pred_min] = pred_min\n \n return pred_list\n\ndef get_mape(y_true, y_pred): \n \"\"\"\n Compute mean absolute percentage error (MAPE)\n \"\"\"\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\nstk_path = '../data/TATAMOTORS.csv'\n\ndf = pd.read_csv(stk_path, sep = \",\")\ndf['Date'] = df['Date'].apply(str)\n\nfor i in range(0, len(df['Date'])):\n df['Date'][i] = df['Date'][i][:4] + '-' + df['Date'][i][4:6] + '-' + df['Date'][i][6:]\n \ndf.loc[:, 'Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d')\ndf.columns = [str(x).lower().replace(' ', '_') for x in df.columns]\ndf['month'] = df['date'].dt.month\ndf.sort_values(by='date', inplace=True, ascending=True)\n\n# rcParams['figure.figsize'] = 10, 8 # width 10, height 8\n\n# ax = df.plot(x='date', y='close', style='b-', grid=True)\n# ax.set_xlabel(\"date\")\n# ax.set_ylabel(\"INR\")\n# plt.show()\n\n\n# Get sizes of each of the datasets\nnum_cv = int(cv_size*len(df))\nnum_test = int(test_size*len(df))\nnum_train = len(df) - num_cv - num_test\n# print(\"num_train = \" + str(num_train))\n# print(\"num_cv = \" + str(num_cv))\n# print(\"num_test = \" + str(num_test))\n\n# Split into train, cv, and test\ntrain = df[:num_train].copy()\ncv = df[num_train:num_train+num_cv].copy()\ntrain_cv = df[:num_train+num_cv].copy()\ntest = df[num_train+num_cv:].copy()\n# print(\"train.shape = \" + str(train.shape))\n# print(\"cv.shape = \" + str(cv.shape))\n# print(\"train_cv.shape = \" + str(train_cv.shape))\n# print(\"test.shape = \" + str(test.shape))\n\n# Plot adjusted close over time\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\n\n# ax = train.plot(x='date', y='close', style='b-', grid=True)\n# ax = cv.plot(x='date', y='close', style='y-', grid=True, ax=ax)\n# ax = test.plot(x='date', y='close', style='g-', grid=True, ax=ax)\n# ax.legend(['train', 'dev', 'test'])\n# ax.set_xlabel(\"date\")\n# ax.set_ylabel(\"USD\")\n# plt.show()\n\nRMSE = []\nR2 = []\nmape = []\nfor N in range(1, Nmax+1): # N is no. of samples to use to predict the next value\n est_list = get_preds_lin_reg(train_cv, 'close', N, 0, num_train)\n \n cv.loc[:, 'est' + '_N' + str(N)] = est_list\n RMSE.append(math.sqrt(mean_squared_error(est_list, cv['close'])))\n R2.append(r2_score(cv['close'], est_list))\n mape.append(get_mape(cv['close'], est_list))\n\n# print('RMSE = ' + str(RMSE))\n# print('R2 = ' + str(R2))\n# print('MAPE = ' + str(mape))\n\n# matplotlib.rcParams.update({'font.size': 14})\n# plt.figure(figsize=(12, 8), dpi=80)\n# plt.plot(range(1, Nmax+1), RMSE, 'x-')\n# plt.grid()\n# plt.xlabel('N')\n# plt.ylabel('RMSE')\n# plt.xlim([2, 30])\n# plt.show()\n\n# matplotlib.rcParams.update({'font.size': 14})\n# plt.figure(figsize=(12, 8), dpi=80)\n# plt.plot(range(1, Nmax+1), R2, 'x-')\n# plt.grid()\n# plt.xlabel('N')\n# plt.ylabel('R2')\n# plt.show()\n\n# plt.figure(figsize=(12, 8), dpi=80)\n# plt.plot(range(1, Nmax+1), mape, 'x-')\n# plt.grid()\n# plt.xlabel('N')\n# plt.ylabel('MAPE')\n# plt.show()\n\nest_list = get_preds_lin_reg(df, 'adj_close', N_opt, 0, num_train+num_cv)\ntest.loc[:, 'est' + '_N' + str(N_opt)] = est_list\n\n# Plot adjusted close over time, only for test set\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\nmatplotlib.rcParams.update({'font.size': 14})\n\nax = test.plot(x='date', y='close', style='gx-', grid=True)\nax = test.plot(x='date', y='est_N5', style='rx-', grid=True, ax=ax)\nax.legend(['test', 'predictions using linear regression'], loc='upper left')\nax.set_xlabel(\"date\")\nax.set_ylabel(\"INR\")\nax.set_xlim([date(2018, 4, 23), date(2018, 11, 23)])\nax.set_ylim([130, 155])\nplt.show()"
},
{
"alpha_fraction": 0.5288662314414978,
"alphanum_fraction": 0.5362856388092041,
"avg_line_length": 28.541095733642578,
"blob_id": "37dc4bb9d9a20dd7fa9642c41d38b9221cab2a86",
"content_id": "f16e1aff87d3e882ea0073e58a9ebc5d2e31eaf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4313,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 146,
"path": "/Frontend/analysis/src/components/Dashboard/prediction/LineChart.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport PropTypes from \"prop-types\";\n\nimport { format } from \"d3-format\";\nimport { timeFormat } from \"d3-time-format\";\n\nimport { ChartCanvas, Chart } from \"react-stockcharts\";\nimport { LineSeries } from \"react-stockcharts/lib/series\";\nimport { XAxis, YAxis } from \"react-stockcharts/lib/axes\";\nimport {\n CrossHairCursor,\n MouseCoordinateX,\n MouseCoordinateY,\n} from \"react-stockcharts/lib/coordinates\";\n\nimport { discontinuousTimeScaleProvider } from \"react-stockcharts/lib/scale\";\nimport { OHLCTooltip } from \"react-stockcharts/lib/tooltip\";\nimport { fitWidth } from \"react-stockcharts/lib/helper\";\nimport { last } from \"react-stockcharts/lib/utils\";\nimport { MovingAverageTooltip } from \"react-stockcharts/lib/tooltip\";\nimport { sma } from \"react-stockcharts/lib/indicator\";\nimport { CurrentCoordinate } from \"react-stockcharts/lib/coordinates\";\n\nclass LineChart extends React.Component {\n render() {\n const { data: initialData, type, width, ratio } = this.props;\n const xScaleProvider = discontinuousTimeScaleProvider.inputDateAccessor(\n (d) => d.date\n );\n const { data, xScale, xAccessor, displayXAccessor } = xScaleProvider(\n initialData\n );\n const height = window.innerHeight - window.innerHeight * 0.5;\n const margin = { left: 40, right: 80, top: 20, bottom: 30 };\n const xExtents = [xAccessor(last(data)), xAccessor(data[data.length - 50])];\n // VWAP\n const vwap = sma()\n .merge((d, c) => {\n d.VWAP = c;\n })\n .accessor((d) => d.VWAP)\n .stroke(\"red\");\n\n // Forecast\n const forecast = sma()\n .merge((d, c) => {\n d.Forecast = c;\n })\n .accessor((d) => d.Forecast)\n .stroke(\"blue\");\n\n return (\n <ChartCanvas\n height={height}\n ratio={ratio}\n width={window.innerWidth - window.innerWidth * 0.58}\n margin={margin}\n type={type}\n pointsPerPxThreshold={1}\n data={data}\n xAccessor={xAccessor}\n displayXAccessor={displayXAccessor}\n xScale={xScale}\n xExtents={xExtents}\n >\n <Chart\n id={1}\n yExtents={[(d) => [d.VWAP, d.Forecast]]}\n padding={{ top: 45, bottom: 50 }}\n >\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={5} />\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <LineSeries\n yAccessor={vwap.accessor()}\n stroke={vwap.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={vwap.accessor()}\n fill={vwap.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[0, 0]}\n options={[\n {\n yAccessor: vwap.accessor(),\n type: \"VWAP\",\n windowSize: 0,\n stroke: vwap.stroke(),\n },\n ]}\n />\n\n <LineSeries\n yAccessor={forecast.accessor()}\n stroke={forecast.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={forecast.accessor()}\n fill={forecast.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[60, 0]}\n options={[\n {\n yAccessor: forecast.accessor(),\n type: \"Forecast\",\n windowSize: 0,\n stroke: forecast.stroke(),\n },\n ]}\n />\n </Chart>\n\n <CrossHairCursor />\n </ChartCanvas>\n );\n }\n}\n\nLineChart.propTypes = {\n data: PropTypes.array.isRequired,\n width: PropTypes.number.isRequired,\n ratio: PropTypes.number.isRequired,\n type: PropTypes.oneOf([\"svg\", \"hybrid\"]).isRequired,\n};\n\nLineChart.defaultProps = {\n type: \"svg\",\n};\nLineChart = fitWidth(LineChart);\n\nexport default LineChart;\n"
},
{
"alpha_fraction": 0.8648648858070374,
"alphanum_fraction": 0.8648648858070374,
"avg_line_length": 37,
"blob_id": "7363586b737fa65ee3921fae388f0064b3362648",
"content_id": "11c6e181af49f95b06ec2f7fa14a1bfdfdeea69f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 1,
"path": "/API/data_loading/__init__.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "from .company_data import CompanyData"
},
{
"alpha_fraction": 0.3767806887626648,
"alphanum_fraction": 0.3991010785102844,
"avg_line_length": 26.12190055847168,
"blob_id": "52a0b377979582e0794acc30d5067a767b722f77",
"content_id": "c80a9b821c8ecb33f4364c8c111b6cd4eaf179da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 13127,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 484,
"path": "/Frontend/analysis/src/components/Dashboard/Charts/Indicators.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport { LineSeries } from \"react-stockcharts/lib/series\";\nimport { CurrentCoordinate } from \"react-stockcharts/lib/coordinates\";\n\nimport { MovingAverageTooltip } from \"react-stockcharts/lib/tooltip\";\nimport { ema, wma, sma } from \"react-stockcharts/lib/indicator\";\nimport SARSeries from \"react-stockcharts/lib/series/SARSeries\";\nimport SingleValueTooltip from \"react-stockcharts/lib/tooltip/SingleValueTooltip\";\nimport bollingerBand from \"react-stockcharts/lib/indicator/bollingerBand\";\nimport BollingerSeries from \"react-stockcharts/lib/series/BollingerSeries\";\nimport BollingerBandTooltip from \"react-stockcharts/lib/tooltip/BollingerBandTooltip\";\n\nconst bbStroke = {\n top: \"#964B00\",\n middle: \"#000000\",\n bottom: \"#964B00\",\n};\n\nconst bbFill = \"#4682B4\";\n\nclass Indicators extends React.Component {\n render() {\n const { data: initialData, indicators } = this.props;\n // EMA\n const ema50 = ema()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.EMA = c;\n })\n .accessor((d) => d.EMA)\n .stroke(\"blue\");\n\n // SMA\n const sma50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.SMA = c;\n })\n .accessor((d) => d.SMA)\n .stroke(\"red\");\n\n // DEMA\n const dema50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.DEMA = c;\n })\n .accessor((d) => d.DEMA)\n .stroke(\"green\");\n\n // KAMA\n const kama50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.KAMA = c;\n })\n .accessor((d) => d.KAMA)\n .stroke(\"yellow\");\n\n // HT_TRENDLINE\n const htline50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_TRENDLINE = c;\n })\n .accessor((d) => d.HT_TRENDLINE)\n .stroke(\"black\");\n\n // T3\n const t350 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.T3 = c;\n })\n .accessor((d) => d.T3)\n .stroke(\"brown\");\n\n // TEMA\n const tema50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.TEMA = c;\n })\n .accessor((d) => d.TEMA)\n .stroke(\"#78c4d4\");\n\n // TRIMA\n const trima50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.TRIMA = c;\n })\n .accessor((d) => d.TRIMA)\n .stroke(\"orange\");\n\n // WMA\n const wma50 = wma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.WMA = c;\n })\n .accessor((d) => d.WMA)\n .stroke(\"purple\");\n\n // MAMA\n const mama50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MAMA = c;\n })\n .accessor((d) => d.MAMA)\n .stroke(\"grey\");\n\n // FAMA\n const fama50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.FAMA = c;\n })\n .accessor((d) => d.FAMA)\n .stroke(\"violet\");\n\n // SAR\n const accelerationFactor = 0.02;\n const maxAccelerationFactor = 0.2;\n\n // BBANDS\n const bb = bollingerBand()\n .merge((d, c) => {\n d.BB = c;\n })\n .accessor((d) => d.BB);\n\n return (\n <>\n {indicators[\"SMA\"] ? (\n <>\n <LineSeries\n yAccessor={sma50.accessor()}\n stroke={sma50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={sma50.accessor()}\n fill={sma50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[0, 0]}\n options={[\n {\n yAccessor: sma50.accessor(),\n type: \"SMA\",\n stroke: sma50.stroke(),\n windowSize: sma50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"EMA\"] ? (\n <>\n <LineSeries\n yAccessor={ema50.accessor()}\n stroke={ema50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={ema50.accessor()}\n fill={ema50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[60, 0]}\n options={[\n {\n yAccessor: ema50.accessor(),\n type: \"EMA\",\n stroke: ema50.stroke(),\n windowSize: ema50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"DEMA\"] ? (\n <>\n <LineSeries\n yAccessor={dema50.accessor()}\n stroke={dema50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={dema50.accessor()}\n fill={dema50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[120, 0]}\n options={[\n {\n yAccessor: dema50.accessor(),\n type: \"DEMA\",\n stroke: dema50.stroke(),\n windowSize: dema50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"KAMA\"] ? (\n <>\n <LineSeries\n yAccessor={kama50.accessor()}\n stroke={kama50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={kama50.accessor()}\n fill={kama50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[185, 0]}\n options={[\n {\n yAccessor: kama50.accessor(),\n type: \"KAMA\",\n stroke: kama50.stroke(),\n windowSize: kama50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"HT_TRENDLINE\"] ? (\n <>\n <LineSeries\n yAccessor={htline50.accessor()}\n stroke={htline50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={htline50.accessor()}\n fill={htline50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[250, 0]}\n options={[\n {\n yAccessor: htline50.accessor(),\n type: \"HT_TRENDLINE\",\n stroke: htline50.stroke(),\n windowSize: htline50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"T3\"] ? (\n <>\n <LineSeries\n yAccessor={t350.accessor()}\n stroke={t350.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={t350.accessor()}\n fill={t350.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[365, 0]}\n options={[\n {\n yAccessor: t350.accessor(),\n type: \"T3\",\n stroke: t350.stroke(),\n windowSize: t350.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"TEMA\"] ? (\n <>\n <LineSeries\n yAccessor={tema50.accessor()}\n stroke={tema50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={tema50.accessor()}\n fill={tema50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[415, 0]}\n options={[\n {\n yAccessor: tema50.accessor(),\n type: \"TEMA\",\n stroke: tema50.stroke(),\n windowSize: tema50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"TRIMA\"] ? (\n <>\n <LineSeries\n yAccessor={trima50.accessor()}\n stroke={trima50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={trima50.accessor()}\n fill={trima50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[480, 0]}\n options={[\n {\n yAccessor: trima50.accessor(),\n type: \"TRIMA\",\n stroke: trima50.stroke(),\n windowSize: trima50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"WMA\"] ? (\n <>\n <LineSeries\n yAccessor={wma50.accessor()}\n stroke={wma50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={wma50.accessor()}\n fill={wma50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[545, 0]}\n options={[\n {\n yAccessor: wma50.accessor(),\n type: \"WMA\",\n stroke: wma50.stroke(),\n windowSize: wma50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"MAMA\"] ? (\n <>\n <LineSeries\n yAccessor={mama50.accessor()}\n stroke={mama50.stroke()}\n highlightOnHover\n />\n <LineSeries\n yAccessor={fama50.accessor()}\n stroke={fama50.stroke()}\n highlightOnHover\n />\n <CurrentCoordinate\n yAccessor={mama50.accessor()}\n fill={mama50.stroke()}\n />\n <CurrentCoordinate\n yAccessor={fama50.accessor()}\n fill={fama50.stroke()}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[605, 0]}\n options={[\n {\n yAccessor: mama50.accessor(),\n type: \"MAMA\",\n stroke: mama50.stroke(),\n windowSize: mama50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n <MovingAverageTooltip\n onClick={(e) => console.log(e)}\n origin={[670, 0]}\n options={[\n {\n yAccessor: fama50.accessor(),\n type: \"FAMA\",\n stroke: fama50.stroke(),\n windowSize: fama50.options().windowSize,\n echo: \"some echo here\",\n },\n ]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"SAR\"] ? (\n <>\n <SARSeries yAccessor={(d) => d.SAR} />\n <SingleValueTooltip\n yLabel={`SAR (${accelerationFactor}, ${maxAccelerationFactor})`}\n yAccessor={(d) => d.SAR}\n origin={[0, 40]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"SAREXT\"] ? (\n <>\n <SARSeries yAccessor={(d) => d.SAREXT} />\n <SingleValueTooltip\n yLabel={`SAREXT (${accelerationFactor}, ${maxAccelerationFactor})`}\n yAccessor={(d) => d.SAREXT}\n origin={[0, 55]}\n />\n </>\n ) : (\n <></>\n )}\n {indicators[\"BBANDS\"] ? (\n <>\n <BollingerSeries\n yAccessor={(d) => d.BB}\n stroke={bbStroke}\n fill={bbFill}\n />\n <BollingerBandTooltip\n origin={[130, 40]}\n yAccessor={(d) => d.BB}\n options={bb.options()}\n />\n </>\n ) : (\n <></>\n )}\n BBANDS\n </>\n );\n }\n}\n\nexport default Indicators;\n"
},
{
"alpha_fraction": 0.38523879647254944,
"alphanum_fraction": 0.40524351596832275,
"avg_line_length": 29.283214569091797,
"blob_id": "7269118ae2845de47aaf55e01f15c5483a66d006",
"content_id": "a3a8f9b056dfd276d027d3242a15354983392181",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 76132,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 2514,
"path": "/Frontend/analysis/src/components/Dashboard/Charts/AreaChart.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport PropTypes from \"prop-types\";\n\nimport { scaleTime } from \"d3-scale\";\nimport { curveMonotoneX } from \"d3-shape\";\n\nimport { format } from \"d3-format\";\nimport { timeFormat } from \"d3-time-format\";\n\nimport { ChartCanvas, Chart } from \"react-stockcharts\";\nimport { AreaSeries } from \"react-stockcharts/lib/series\";\nimport { XAxis, YAxis } from \"react-stockcharts/lib/axes\";\nimport { fitWidth } from \"react-stockcharts/lib/helper\";\nimport {\n createVerticalLinearGradient,\n hexToRGBA,\n} from \"react-stockcharts/lib/utils\";\nimport {\n CrossHairCursor,\n MouseCoordinateX,\n MouseCoordinateY,\n} from \"react-stockcharts/lib/coordinates\";\nimport { last } from \"react-stockcharts/lib/utils\";\nimport { OHLCTooltip } from \"react-stockcharts/lib/tooltip\";\nimport { ema, wma, sma, tma } from \"react-stockcharts/lib/indicator\";\nimport MovingAverageTooltip from \"react-stockcharts/lib/tooltip/MovingAverageTooltip\";\nimport CurrentCoordinate from \"react-stockcharts/lib/coordinates/CurrentCoordinate\";\nimport LineSeries from \"react-stockcharts/lib/series/LineSeries\";\nimport Indicators from \"./Indicators\";\nimport BarSeries from \"react-stockcharts/lib/series/BarSeries\";\nimport { Studies } from \"../studiesList\";\n\nimport PriceCoordinate from \"react-stockcharts/lib/coordinates/PriceCoordinate\";\nimport macd from \"react-stockcharts/lib/indicator/macd\";\nimport MACDSeries from \"react-stockcharts/lib/series/MACDSeries\";\nimport MACDTooltip from \"react-stockcharts/lib/tooltip/MACDTooltip\";\nimport SingleValueTooltip from \"react-stockcharts/lib/tooltip/SingleValueTooltip\";\nimport rsi from \"react-stockcharts/lib/indicator/rsi\";\nimport RSISeries from \"react-stockcharts/lib/series/RSISeries\";\nimport RSITooltip from \"react-stockcharts/lib/tooltip/RSITooltip\";\nimport stochasticOscillator from \"react-stockcharts/lib/indicator/stochasticOscillator\";\nimport StochasticSeries from \"react-stockcharts/lib/series/StochasticSeries\";\nimport StochasticTooltip from \"react-stockcharts/lib/tooltip/StochasticTooltip\";\nimport EdgeIndicator from \"react-stockcharts/lib/coordinates/EdgeIndicator\";\n\nconst canvasGradient = createVerticalLinearGradient([\n { stop: 0, color: hexToRGBA(\"#b5d0ff\", 0.2) },\n { stop: 0.7, color: hexToRGBA(\"#6fa4fc\", 0.4) },\n { stop: 1, color: hexToRGBA(\"#4286f4\", 0.8) },\n]);\n\nclass AreaChart extends React.Component {\n render() {\n const {\n data,\n type,\n width,\n ratio,\n MA,\n indicators,\n selectedStudy,\n } = this.props;\n const margin = { left: 80, right: 80, top: 30, bottom: 50 };\n const xAccessor = (d) => d.date;\n const height = window.innerHeight - 100;\n const xExtents = [xAccessor(last(data)), xAccessor(data[data.length - 50])];\n let flag = true;\n for (let i = 0; i < Studies.length; i++) {\n if (indicators[Studies[i]]) {\n flag = false;\n break;\n } else flag = true;\n }\n\n // const updateIndicators = (currentStudy) => {\n // for (let i = 0; i < Studies.length; i++) {\n // indicators[Studies[i]] = false;\n // }\n // indicators[currentStudy] = true;\n // };\n\n // MIDPRICE\n const midpr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MIDPRICE = c;\n })\n .accessor((d) => d.MIDPRICE)\n .stroke(\"blue\");\n\n // MIDPOINT\n const midpt50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MIDPOINT = c;\n })\n .accessor((d) => d.MIDPOINT)\n .stroke(\"red\");\n\n // ADX\n const adx50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ADX = c;\n })\n .accessor((d) => d.ADX)\n .stroke(\"blue\");\n\n // ADXR\n const adxr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ADXR = c;\n })\n .accessor((d) => d.ADXR)\n .stroke(\"red\");\n\n // APO\n const apo50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.APO = c;\n })\n .accessor((d) => d.APO)\n .stroke(\"red\");\n\n // AROON0\n const aroon050 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.AROON0 = c;\n })\n .accessor((d) => d.AROON0)\n .stroke(\"red\");\n // AROON1\n const aroon150 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.AROON1 = c;\n })\n .accessor((d) => d.AROON1)\n .stroke(\"blue\");\n\n // AROONOSC\n const aroonosc50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.AROONOSC = c;\n })\n .accessor((d) => d.AROONOSC)\n .stroke(\"blue\");\n\n // BOP\n const bop50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.BOP = c;\n })\n .accessor((d) => d.BOP)\n .stroke(\"red\");\n\n // CCI\n const cci50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.CCI = c;\n })\n .accessor((d) => d.CCI)\n .stroke(\"red\");\n\n // CMO\n const cmo50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.CMO = c;\n })\n .accessor((d) => d.CMO)\n .stroke(\"blue\");\n\n // DX\n const dx50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.DX = c;\n })\n .accessor((d) => d.DX)\n .stroke(\"blue\");\n\n //MACD\n const macdAppearance = {\n stroke: {\n macd: \"#FF0000\",\n signal: \"#00F300\",\n },\n fill: {\n divergence: \"#4682B4\",\n },\n };\n const macdCalculator = macd()\n .options({\n fast: 12,\n slow: 26,\n signal: 9,\n })\n .merge((d, c) => {\n d.MACD = c;\n })\n .accessor((d) => d.MACD);\n const macdextCalculator = macd()\n .options({\n fast: 12,\n slow: 26,\n signal: 9,\n })\n .merge((d, c) => {\n d.MACDEXT = c;\n })\n .accessor((d) => d.MACDEXT);\n const macdfixCalculator = macd()\n .options({\n fast: 12,\n slow: 26,\n signal: 9,\n })\n .merge((d, c) => {\n d.MACDFIX = c;\n })\n .accessor((d) => d.MACDFIX);\n\n // MFI\n const mfi50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MFI = c;\n })\n .accessor((d) => d.MFI)\n .stroke(\"blue\");\n\n // MINUS_DI\n const minusdi50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MINUS_DI = c;\n })\n .accessor((d) => d.MINUS_DI)\n .stroke(\"red\");\n\n // MINUS_DM\n const minusdm50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MINUS_DM = c;\n })\n .accessor((d) => d.MINUS_DM)\n .stroke(\"blue\");\n\n // MOM\n const mom50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MOM = c;\n })\n .accessor((d) => d.MOM)\n .stroke(\"red\");\n\n // PLUS_DI\n const plusdi50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.PLUS_DI = c;\n })\n .accessor((d) => d.PLUS_DI)\n .stroke(\"blue\");\n\n // PLUS_DM\n const plusdm50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.PLUS_DM = c;\n })\n .accessor((d) => d.PLUS_DM)\n .stroke(\"red\");\n\n // PPO\n const ppo50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.PPO = c;\n })\n .accessor((d) => d.PPO)\n .stroke(\"blue\");\n\n // ROC\n const roc50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ROC = c;\n })\n .accessor((d) => d.ROC)\n .stroke(\"blue\");\n\n // ROCP\n const rocp50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ROCP = c;\n })\n .accessor((d) => d.ROCP)\n .stroke(\"green\");\n\n // ROCR\n const rocr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ROCR = c;\n })\n .accessor((d) => d.ROCR)\n .stroke(\"red\");\n\n // ROCR100\n const rocr100 = sma()\n .options({ windowSize: 100 })\n .merge((d, c) => {\n d.ROCR100 = c;\n })\n .accessor((d) => d.ROCR100)\n .stroke(\"blue\");\n\n // RSI\n const rsiCalculator = rsi()\n .options({ windowSize: 14 })\n .merge((d, c) => {\n d.RSI = c;\n })\n .accessor((d) => d.RSI);\n\n // STOCH\n const stoAppearance = {\n stroke: Object.assign({}, StochasticSeries.defaultProps.stroke),\n };\n\n const stoch = stochasticOscillator()\n .options({ windowSize: 14, kWindowSize: 3 })\n .merge((d, c) => {\n d.STOCH = c;\n })\n .accessor((d) => d.STOCH);\n const stochf = stochasticOscillator()\n .options({ windowSize: 14, kWindowSize: 3 })\n .merge((d, c) => {\n d.STOCHF = c;\n })\n .accessor((d) => d.STOCHF);\n const stochrsi = stochasticOscillator()\n .options({ windowSize: 14, kWindowSize: 3 })\n .merge((d, c) => {\n d.STOCHRSI = c;\n })\n .accessor((d) => d.STOCHRSI);\n\n // TRIX\n const trix50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.TRIX = c;\n })\n .accessor((d) => d.TRIX)\n .stroke(\"blue\");\n\n // ULTOSC\n const ultosc50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ULTOSC = c;\n })\n .accessor((d) => d.ULTOSC)\n .stroke(\"blue\");\n\n // WILLR\n const willr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.WILLR = c;\n })\n .accessor((d) => d.WILLR)\n .stroke(\"red\");\n\n // AD\n const ad50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.AD = c;\n })\n .accessor((d) => d.AD)\n .stroke(\"red\");\n\n // ADOSC\n const adsoc50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ADOSC = c;\n })\n .accessor((d) => d.ADOSC)\n .stroke(\"red\");\n\n // OBV\n const obv50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.OBV = c;\n })\n .accessor((d) => d.OBV)\n .stroke(\"red\");\n\n // ATR\n const atr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.ATR = c;\n })\n .accessor((d) => d.ATR)\n .stroke(\"red\");\n\n // NATR\n const natr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.NATR = c;\n })\n .accessor((d) => d.NATR)\n .stroke(\"blue\");\n\n // TRANGE\n const trange50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.TRANGE = c;\n })\n .accessor((d) => d.TRANGE)\n .stroke(\"red\");\n\n // AVGPRICE\n const avg50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.AVGPRICE = c;\n })\n .accessor((d) => d.AVGPRICE)\n .stroke(\"black\");\n\n // MEDPRICE\n const med50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.MEDPRICE = c;\n })\n .accessor((d) => d.MEDPRICE)\n .stroke(\"black\");\n\n // TYPPRICE\n const typ50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.TYPPRICE = c;\n })\n .accessor((d) => d.TYPPRICE)\n .stroke(\"black\");\n\n // WCLPRICE\n const wcl50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.WCLPRICE = c;\n })\n .accessor((d) => d.WCLPRICE)\n .stroke(\"black\");\n\n // HT_DCPERIOD\n const htdcpr50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_DCPERIOD = c;\n })\n .accessor((d) => d.HT_DCPERIOD)\n .stroke(\"grey\");\n\n // HT_DCPHASE\n const htdcph50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_DCPHASE = c;\n })\n .accessor((d) => d.HT_DCPHASE)\n .stroke(\"grey\");\n\n // HT_PHASOR0\n const htdcphsr050 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_PHASOR0 = c;\n })\n .accessor((d) => d.HT_PHASOR0)\n .stroke(\"blue\");\n\n // HT_PHASOR1\n const htdcphsr150 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_PHASOR1 = c;\n })\n .accessor((d) => d.HT_PHASOR1)\n .stroke(\"green\");\n\n // HT_SINE0\n const htsine050 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_SINE0 = c;\n })\n .accessor((d) => d.HT_SINE0)\n .stroke(\"blue\");\n\n // HT_SINE1\n const htsine150 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_SINE1 = c;\n })\n .accessor((d) => d.HT_SINE1)\n .stroke(\"green\");\n\n // HT_TRENDMODE\n const httrend50 = sma()\n .options({ windowSize: 50 })\n .merge((d, c) => {\n d.HT_TRENDMODE = c;\n })\n .accessor((d) => d.HT_TRENDMODE)\n .stroke(\"grey\");\n\n const priceChart = () => {\n switch (selectedStudy) {\n case \"MIDPRICE\":\n return (\n <Chart\n yExtents={[(d) => [d.MIDPRICE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <SingleValueTooltip\n yAccessor={midpr50.accessor()}\n yLabel={`MIDPRICE (${midpr50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <LineSeries\n yAccessor={midpr50.accessor()}\n stroke={midpr50.stroke()}\n />\n </Chart>\n );\n case \"MIDPOINT\":\n return (\n <Chart\n yExtents={[(d) => [d.MIDPOINT]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={midpt50.accessor()}\n yLabel={`MIDPOINT (${midpt50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={midpt50.accessor()}\n stroke={midpt50.stroke()}\n />\n </Chart>\n );\n case \"ADX\":\n return (\n <Chart\n yExtents={[(d) => [d.ADX]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={adx50.accessor()}\n yLabel={`ADX (${adx50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={adx50.accessor()}\n stroke={adx50.stroke()}\n />\n </Chart>\n );\n case \"ADXR\":\n return (\n <Chart\n yExtents={[(d) => [d.ADXR]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={adxr50.accessor()}\n yLabel={`ADXR (${adxr50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={adxr50.accessor()}\n stroke={adxr50.stroke()}\n />\n </Chart>\n );\n case \"APO\":\n return (\n <Chart\n yExtents={[(d) => [d.APO]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={apo50.accessor()}\n yLabel={`APO (${apo50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={apo50.accessor()}\n stroke={apo50.stroke()}\n />\n </Chart>\n );\n case \"AROON\":\n return (\n <Chart\n yExtents={[(d) => [d.AROON0, d.AROON1]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <SingleValueTooltip\n yAccessor={aroon050.accessor()}\n yLabel={`AROON0 (${aroon050.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <SingleValueTooltip\n yAccessor={aroon150.accessor()}\n yLabel={`AROON1 (${aroon150.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[80, 0]}\n />\n <LineSeries\n yAccessor={aroon050.accessor()}\n stroke={aroon050.stroke()}\n />\n <LineSeries\n yAccessor={aroon150.accessor()}\n stroke={aroon150.stroke()}\n />\n </Chart>\n );\n case \"AROONOSC\":\n return (\n <Chart\n yExtents={[(d) => [d.AROONOSC]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={aroonosc50.accessor()}\n yLabel={`AROONOSC (${aroonosc50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={aroonosc50.accessor()}\n stroke={aroonosc50.stroke()}\n />\n </Chart>\n );\n case \"BOP\":\n return (\n <Chart\n yExtents={[(d) => [d.BOP]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={bop50.accessor()}\n yLabel={`BOP (${bop50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={bop50.accessor()}\n stroke={bop50.stroke()}\n />\n </Chart>\n );\n case \"CCI\":\n return (\n <Chart\n yExtents={[(d) => [d.CCI]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <SingleValueTooltip\n yAccessor={cci50.accessor()}\n yLabel={`CCI (${cci50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={cci50.accessor()}\n stroke={cci50.stroke()}\n />\n </Chart>\n );\n case \"CMO\":\n return (\n <Chart\n yExtents={[(d) => [d.CMO]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={cmo50.accessor()}\n yLabel={`CMO (${cmo50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={cmo50.accessor()}\n stroke={cmo50.stroke()}\n />\n </Chart>\n );\n case \"DX\":\n return (\n <Chart\n yExtents={[(d) => [d.DX]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={dx50.accessor()}\n yLabel={`DX (${dx50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries yAccessor={dx50.accessor()} stroke={dx50.stroke()} />\n </Chart>\n );\n case \"MACD\":\n return (\n <Chart\n yExtents={macdCalculator.accessor()}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n\n <MACDSeries yAccessor={(d) => d.MACD} {...macdAppearance} />\n <MACDTooltip\n origin={[-38, 15]}\n yAccessor={(d) => d.MACD}\n options={macdCalculator.options()}\n appearance={macdAppearance}\n />\n </Chart>\n );\n case \"MACDEXT\":\n return (\n <Chart\n yExtents={macdextCalculator.accessor()}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n\n <MACDSeries yAccessor={(d) => d.MACDEXT} {...macdAppearance} />\n <MACDTooltip\n origin={[-38, 15]}\n yAccessor={(d) => d.MACDEXT}\n options={macdextCalculator.options()}\n appearance={macdAppearance}\n />\n </Chart>\n );\n case \"MACDFIX\":\n return (\n <Chart\n yExtents={macdfixCalculator.accessor()}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n\n <MACDSeries yAccessor={(d) => d.MACDFIX} {...macdAppearance} />\n <MACDTooltip\n origin={[-38, 15]}\n yAccessor={(d) => d.MACDFIX}\n options={macdfixCalculator.options()}\n appearance={macdAppearance}\n />\n </Chart>\n );\n case \"MFI\":\n return (\n <Chart\n yExtents={[(d) => [d.MFI]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={mfi50.accessor()}\n yLabel={`MFI (${mfi50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={mfi50.accessor()}\n stroke={mfi50.stroke()}\n />\n </Chart>\n );\n case \"MINUS_DI\":\n return (\n <Chart\n yExtents={[(d) => [d.MINUS_DI]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={minusdi50.accessor()}\n yLabel={`MINUS_DI (${minusdi50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={minusdi50.accessor()}\n stroke={minusdi50.stroke()}\n />\n </Chart>\n );\n case \"MINUS_DM\":\n return (\n <Chart\n yExtents={[(d) => [d.MINUS_DM]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={minusdm50.accessor()}\n yLabel={`MINUS_DM (${minusdm50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={minusdm50.accessor()}\n stroke={minusdm50.stroke()}\n />\n </Chart>\n );\n case \"MOM\":\n return (\n <Chart\n yExtents={[(d) => [d.MOM]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={mom50.accessor()}\n yLabel={`MOM (${mom50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={mom50.accessor()}\n stroke={mom50.stroke()}\n />\n </Chart>\n );\n case \"PLUS_DI\":\n return (\n <Chart\n yExtents={[(d) => [d.PLUS_DI]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <SingleValueTooltip\n yAccessor={plusdi50.accessor()}\n yLabel={`PLUS_DI (${plusdi50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n\n <LineSeries\n yAccessor={plusdi50.accessor()}\n stroke={plusdi50.stroke()}\n />\n </Chart>\n );\n case \"PLUS_DM\":\n return (\n <Chart\n yExtents={[(d) => [d.PLUS_DM]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={plusdm50.accessor()}\n yLabel={`PLUS_DM (${plusdm50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={plusdm50.accessor()}\n stroke={plusdm50.stroke()}\n />\n </Chart>\n );\n case \"PPO\":\n return (\n <Chart\n yExtents={[(d) => [d.PPO]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={ppo50.accessor()}\n yLabel={`PPO (${ppo50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={ppo50.accessor()}\n stroke={ppo50.stroke()}\n />\n </Chart>\n );\n case \"ROC\":\n return (\n <Chart\n yExtents={[(d) => [d.ROC]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={roc50.accessor()}\n yLabel={`ROC (${roc50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={roc50.accessor()}\n stroke={roc50.stroke()}\n />\n </Chart>\n );\n case \"ROCP\":\n return (\n <Chart\n yExtents={[(d) => [d.ROCP]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={rocp50.accessor()}\n yLabel={`ROCP (${rocp50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={rocp50.accessor()}\n stroke={rocp50.stroke()}\n />\n </Chart>\n );\n case \"ROCR\":\n return (\n <Chart\n yExtents={[(d) => [d.ROCR]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={rocr50.accessor()}\n yLabel={`ROCR (${rocr50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={rocr50.accessor()}\n stroke={rocr50.stroke()}\n />\n </Chart>\n );\n case \"ROCR100\":\n return (\n <Chart\n yExtents={[(d) => [d.ROCR100]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={rocr100.accessor()}\n yLabel={`ROCR (${rocr100.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={rocr100.accessor()}\n stroke={rocr100.stroke()}\n />\n </Chart>\n );\n case \"RSI\":\n return (\n <Chart\n yExtents={[(d) => [d.RSI]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <RSISeries yAccessor={(d) => d.RSI} />\n\n <RSITooltip\n origin={[-30, 0]}\n yAccessor={(d) => d.RSI}\n options={rsiCalculator.options()}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n </Chart>\n );\n case \"STOCH\":\n return (\n <Chart\n yExtents={[0, 100]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <StochasticSeries yAccessor={(d) => d.STOCH} {...stoAppearance} />\n\n <StochasticTooltip\n origin={[-30, 0]}\n yAccessor={(d) => d.STOCH}\n options={stoch.options()}\n appearance={stoAppearance}\n label=\"STOCH\"\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n </Chart>\n );\n case \"STOCHF\":\n return (\n <Chart\n yExtents={[0, 100]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <StochasticSeries\n yAccessor={(d) => d.STOCHF}\n {...stoAppearance}\n />\n\n <StochasticTooltip\n origin={[-30, 0]}\n yAccessor={(d) => d.STOCHF}\n options={stochf.options()}\n appearance={stoAppearance}\n label=\"STOCHF\"\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n </Chart>\n );\n case \"STOCHRSI\":\n return (\n <Chart\n yExtents={[0, 100]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <StochasticSeries\n yAccessor={(d) => d.STOCHRSI}\n {...stoAppearance}\n />\n\n <StochasticTooltip\n origin={[-30, 0]}\n yAccessor={(d) => d.STOCHRSI}\n options={stochrsi.options()}\n appearance={stoAppearance}\n label=\"STOCHRSI\"\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n </Chart>\n );\n case \"TRIX\":\n return (\n <Chart\n yExtents={[(d) => [d.TRIX]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={trix50.accessor()}\n yLabel={`TRIX (${trix50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={trix50.accessor()}\n stroke={trix50.stroke()}\n />\n </Chart>\n );\n case \"ULTOSC\":\n return (\n <Chart\n yExtents={[(d) => [d.ULTOSC]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={ultosc50.accessor()}\n yLabel={`ULTOSC (${ultosc50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={ultosc50.accessor()}\n stroke={ultosc50.stroke()}\n />\n </Chart>\n );\n case \"WILLR\":\n return (\n <Chart\n yExtents={[(d) => [d.WILLR]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={willr50.accessor()}\n yLabel={`WILLR (${willr50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={willr50.accessor()}\n stroke={willr50.stroke()}\n />\n </Chart>\n );\n case \"AD\":\n return (\n <Chart\n yExtents={[(d) => [d.AD]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={ad50.accessor()}\n yLabel={`AD (${ad50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries yAccessor={ad50.accessor()} stroke={ad50.stroke()} />\n </Chart>\n );\n case \"ADOSC\":\n return (\n <Chart\n yExtents={[(d) => [d.ADOSC]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={adsoc50.accessor()}\n yLabel={`ADOSC (${adsoc50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={adsoc50.accessor()}\n stroke={adsoc50.stroke()}\n />\n </Chart>\n );\n case \"OBV\":\n return (\n <Chart\n yExtents={[(d) => [d.OBV]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={obv50.accessor()}\n yLabel={`OBV (${obv50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={obv50.accessor()}\n stroke={obv50.stroke()}\n />\n </Chart>\n );\n case \"ATR\":\n return (\n <Chart\n yExtents={[(d) => [d.ATR]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={atr50.accessor()}\n yLabel={`ATR (${atr50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={atr50.accessor()}\n stroke={atr50.stroke()}\n />\n </Chart>\n );\n case \"NATR\":\n return (\n <Chart\n yExtents={[(d) => [d.NATR]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={natr50.accessor()}\n yLabel={`NATR (${natr50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={natr50.accessor()}\n stroke={natr50.stroke()}\n />\n </Chart>\n );\n case \"TRANGE\":\n return (\n <Chart\n yExtents={[(d) => [d.TRANGE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={trange50.accessor()}\n yLabel={`TRANGE (${trange50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={trange50.accessor()}\n stroke={trange50.stroke()}\n />\n </Chart>\n );\n case \"AVGPRICE\":\n return (\n <Chart\n yExtents={[(d) => [d.AVGPRICE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={avg50.accessor()}\n yLabel={`AVGPRICE (${avg50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={avg50.accessor()}\n stroke={avg50.stroke()}\n />\n </Chart>\n );\n case \"MEDPRICE\":\n return (\n <Chart\n yExtents={[(d) => [d.MEDPRICE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={med50.accessor()}\n yLabel={`MEDPRICE (${med50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={med50.accessor()}\n stroke={med50.stroke()}\n />\n </Chart>\n );\n case \"TYPPRICE\":\n return (\n <Chart\n yExtents={[(d) => [d.TYPPRICE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={typ50.accessor()}\n yLabel={`TYPPRICE (${typ50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={typ50.accessor()}\n stroke={typ50.stroke()}\n />\n </Chart>\n );\n case \"WCLPRICE\":\n return (\n <Chart\n yExtents={[(d) => [d.WCLPRICE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={wcl50.accessor()}\n yLabel={`WCLPRICE (${wcl50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={wcl50.accessor()}\n stroke={wcl50.stroke()}\n />\n </Chart>\n );\n case \"HT_DCPERIOD\":\n return (\n <Chart\n yExtents={[(d) => [d.HT_DCPERIOD]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={htdcpr50.accessor()}\n yLabel={`HT_DCPERIOD (${htdcpr50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={htdcpr50.accessor()}\n stroke={htdcpr50.stroke()}\n />\n </Chart>\n );\n case \"HT_DCPHASE\":\n return (\n <Chart\n yExtents={[(d) => [d.HT_DCPHASE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={htdcph50.accessor()}\n yLabel={`HT_DCPHASE (${htdcph50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={htdcph50.accessor()}\n stroke={htdcph50.stroke()}\n />\n </Chart>\n );\n case \"HT_PHASOR\":\n return (\n <Chart\n yExtents={[(d) => [d.HT_PHASOR0, d.HT_PHASOR1]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <SingleValueTooltip\n yAccessor={htdcphsr050.accessor()}\n yLabel={`HT_PHASOR0 (${htdcphsr050.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <SingleValueTooltip\n yAccessor={htdcphsr150.accessor()}\n yLabel={`HT_PHASOR1 (${htdcphsr150.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[120, 0]}\n />\n <LineSeries\n yAccessor={htdcphsr050.accessor()}\n stroke={htdcphsr050.stroke()}\n />\n <LineSeries\n yAccessor={htdcphsr150.accessor()}\n stroke={htdcphsr150.stroke()}\n />\n </Chart>\n );\n case \"HT_SINE\":\n return (\n <Chart\n yExtents={[(d) => [d.HT_SINE0, d.HT_SINE1]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <SingleValueTooltip\n yAccessor={htsine050.accessor()}\n yLabel={`HT_SINE0 (${htsine050.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n <SingleValueTooltip\n yAccessor={htsine150.accessor()}\n yLabel={`HT_SINE1 (${htsine150.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[80, 0]}\n />\n <LineSeries\n yAccessor={htsine050.accessor()}\n stroke={htsine050.stroke()}\n />\n <LineSeries\n yAccessor={htsine150.accessor()}\n stroke={htsine150.stroke()}\n />\n </Chart>\n );\n case \"HT_TRENDMODE\":\n return (\n <Chart\n yExtents={[(d) => [d.HT_TRENDMODE]]}\n height={height * 0.25}\n origin={(w, h) => [0, height * 0.65]}\n >\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <PriceCoordinate\n at=\"left\"\n orient=\"left\"\n price={0}\n displayFormat={format(\".2f\")}\n />\n <XAxis axisAt=\"bottom\" orient=\"bottom\" />\n\n <SingleValueTooltip\n yAccessor={httrend50.accessor()}\n yLabel={`HT_TRENDMODE (${httrend50.options().windowSize})`}\n yDisplayFormat={format(\".2f\")}\n origin={[-30, 0]}\n />\n\n <YAxis axisAt=\"right\" orient=\"right\" ticks={3} />\n <LineSeries\n yAccessor={httrend50.accessor()}\n stroke={httrend50.stroke()}\n />\n </Chart>\n );\n default:\n flag = true;\n }\n };\n return (\n <>\n {flag ? (\n <ChartCanvas\n height={height}\n ratio={ratio}\n width={window.innerWidth - 260}\n margin={margin}\n data={data}\n type={type}\n xAccessor={(d) => d.date}\n xScale={scaleTime()}\n xExtents={xExtents}\n >\n <Chart\n id={0}\n yExtents={[(d) => d.close]}\n padding={{ top: 45, bottom: 0 }}\n >\n <defs>\n <linearGradient id=\"MyGradient\" x1=\"0\" y1=\"100%\" x2=\"0\" y2=\"0%\">\n <stop offset=\"0%\" stopColor=\"#b5d0ff\" stopOpacity={0.2} />\n <stop offset=\"70%\" stopColor=\"#6fa4fc\" stopOpacity={0.4} />\n <stop offset=\"100%\" stopColor=\"#4286f4\" stopOpacity={0.8} />\n </linearGradient>\n </defs>\n <XAxis axisAt=\"bottom\" orient=\"bottom\" ticks={5} />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={5} />\n <AreaSeries\n yAccessor={(d) => d.close}\n fill=\"url(#MyGradient)\"\n strokeWidth={2}\n interpolation={curveMonotoneX}\n canvasGradient={canvasGradient}\n />\n\n <Indicators data={data} indicators={indicators} />\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <OHLCTooltip origin={[-30, -15]} />\n </Chart>\n <Chart\n id={2}\n yExtents={(d) => d.volume}\n height={150}\n origin={(w, h) => [0, h - 150]}\n >\n <YAxis\n axisAt=\"left\"\n orient=\"left\"\n ticks={5}\n tickFormat={format(\".2s\")}\n />\n\n <MouseCoordinateX\n at=\"bottom\"\n orient=\"bottom\"\n displayFormat={timeFormat(\"%Y-%m-%d\")}\n />\n <MouseCoordinateY\n at=\"left\"\n orient=\"left\"\n displayFormat={format(\".4s\")}\n />\n\n <BarSeries\n yAccessor={(d) => d.volume}\n fill={(d) => (d.close > d.open ? \"#a0c5af\" : \"#ff6666\")}\n />\n </Chart>\n <CrossHairCursor />\n </ChartCanvas>\n ) : (\n <ChartCanvas\n height={height}\n ratio={ratio}\n width={window.innerWidth - 260}\n margin={margin}\n data={data}\n type={type}\n xAccessor={(d) => d.date}\n xScale={scaleTime()}\n xExtents={xExtents}\n >\n <Chart\n id={1}\n height={height * 0.6}\n yExtents={[(d) => d.close]}\n padding={{ top: 45, bottom: 0 }}\n >\n <defs>\n <linearGradient id=\"MyGradient\" x1=\"0\" y1=\"100%\" x2=\"0\" y2=\"0%\">\n <stop offset=\"0%\" stopColor=\"#b5d0ff\" stopOpacity={0.2} />\n <stop offset=\"70%\" stopColor=\"#6fa4fc\" stopOpacity={0.4} />\n <stop offset=\"100%\" stopColor=\"#4286f4\" stopOpacity={0.8} />\n </linearGradient>\n </defs>\n <XAxis\n axisAt=\"bottom\"\n orient=\"bottom\"\n showTicks={false}\n outerTickSize={0}\n />\n <YAxis axisAt=\"right\" orient=\"right\" ticks={5} />\n <AreaSeries\n yAccessor={(d) => d.close}\n fill=\"url(#MyGradient)\"\n strokeWidth={2}\n interpolation={curveMonotoneX}\n canvasGradient={canvasGradient}\n />\n\n <Indicators data={data} indicators={indicators} />\n\n <MouseCoordinateY\n at=\"right\"\n orient=\"right\"\n displayFormat={format(\".2f\")}\n />\n <EdgeIndicator\n itemType=\"last\"\n orient=\"right\"\n edgeAt=\"right\"\n yAccessor={(d) => d.close}\n fill={(d) => (d.close > d.open ? \"#6BA583\" : \"#FF0000\")}\n />\n <OHLCTooltip origin={[-30, -15]} />\n </Chart>\n <Chart\n id={2}\n yExtents={(d) => d.volume}\n height={height * 0.2}\n origin={(w, h) => [0, height * 0.4]}\n >\n <YAxis\n axisAt=\"left\"\n orient=\"left\"\n ticks={5}\n tickFormat={format(\".2s\")}\n />\n\n <MouseCoordinateY\n at=\"left\"\n orient=\"left\"\n displayFormat={format(\".4s\")}\n />\n\n <BarSeries\n yAccessor={(d) => d.volume}\n fill={(d) => (d.close > d.open ? \"#a0c5af\" : \"#ff6666\")}\n />\n </Chart>\n {priceChart()}\n <CrossHairCursor />\n </ChartCanvas>\n )}\n </>\n );\n }\n}\n\nAreaChart.propTypes = {\n data: PropTypes.array.isRequired,\n width: PropTypes.number.isRequired,\n ratio: PropTypes.number.isRequired,\n type: PropTypes.oneOf([\"svg\", \"hybrid\"]).isRequired,\n};\n\nAreaChart.defaultProps = {\n type: \"svg\",\n};\nAreaChart = fitWidth(AreaChart);\n\nexport default AreaChart;\n"
},
{
"alpha_fraction": 0.5800970792770386,
"alphanum_fraction": 0.6086165308952332,
"avg_line_length": 24.353845596313477,
"blob_id": "626b1b24605282279e04f693a5bcdcac6b993210",
"content_id": "ac6f31383ae3a7acd97bd93714bf3afbf8dff73f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1648,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 65,
"path": "/Frontend/analysis/src/components/Dashboard/WatchList.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport styled from \"styled-components\";\nimport { useCollection } from \"react-firebase-hooks/firestore\";\nimport { db } from \"../../Firebase\";\nimport { connect } from \"react-redux\";\nimport { setName } from \"../../actions\";\n// import ClearIcon from \"@material-ui/icons/Clear\";\n\nfunction WatchList({ currentCompanyName, setName }) {\n const [companies] = useCollection(\n db\n .collection(\"watchlist\")\n .doc(\"list\")\n .collection(\"companies\")\n .orderBy(\"timestamp\", \"asc\")\n );\n // console.log(companies.docs());\n\n const updateCompany = (event, company) => {\n // event.preventDefault();\n setName(company);\n };\n\n return (\n <WatchListContainer>\n {companies?.docs.map((allCompany) => {\n const { company } = allCompany.data();\n return (\n <CompanyContainer onClick={(event) => updateCompany(event, company)}>\n {company}\n {/* <span>\n <ClearIcon />\n </span> */}\n </CompanyContainer>\n );\n })}\n </WatchListContainer>\n );\n}\nconst mapStateToProps = (state) => ({\n currentCompanyName: state.currentCompany,\n});\nexport default connect(mapStateToProps, { setName })(WatchList);\n\nconst WatchListContainer = styled.div`\n margin-top: 10px;\n width: 100%;\n`;\n\nconst CompanyContainer = styled.div`\n display: flex;\n align-items: center;\n\n padding: 5px;\n color: #fff;\n background: rgb(94, 108, 130);\n margin-top: 10px;\n cursor: pointer;\n\n :hover {\n background: rgba(86, 92, 97, 0.6);\n transform: scale(1.01);\n box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.7), 0 6px 20px 0 rgba(0, 0, 0, 0.19);\n }\n`;\n"
},
{
"alpha_fraction": 0.5106287002563477,
"alphanum_fraction": 0.5195236206054688,
"avg_line_length": 21.793813705444336,
"blob_id": "f097672c674d620450be179b6d151ee8aafe2b08",
"content_id": "34943d8645c4b19c31874e82dcfdc369c5f44713",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6633,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 291,
"path": "/Frontend/analysis/src/components/Dashboard/Charts/RenderChart.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React, { useEffect } from \"react\";\nimport { connect } from \"react-redux\";\nimport styled from \"styled-components\";\nimport {\n fetchPrices,\n setChartType,\n loadStudies,\n latestStudy,\n setName,\n} from \"../../../actions\";\nimport { TypeChooser, SaveChartAsImage } from \"react-stockcharts/lib/helper\";\nimport CandleStick from \"./CandleStick\";\nimport AreaChart from \"./AreaChart\";\nimport LineChart from \"./LineChart\";\nimport OHLCChart from \"./OHLCChart\";\nimport KagiChart from \"./KagiChart\";\nimport RenkoChart from \"./RenkoChart\";\nimport PointFigureChart from \"./PointFigureChart\";\n\nimport { Spinner } from \"react-bootstrap\";\n\nfunction RenderChart({\n prices,\n studies,\n currentType,\n selectedStudy,\n currentCompany,\n fetchPrices,\n}) {\n useEffect(() => {\n fetchPrices(currentCompany);\n }, [currentCompany]);\n let data = [];\n\n prices.map((c) => {\n var dt = c.Date.toString();\n let tempDate = new Date(\n dt.substring(0, 4),\n dt.substring(4, 6) - 1,\n dt.substring(6, 8)\n );\n var bb = { top: c.BBANDS0, middle: c.BBANDS1, bottom: c.BBANDS2 };\n\n var macd = { macd: c.MACD0, signal: c.MACD1, divergence: c.MACD2 };\n\n var macdext = {\n macd: c.MACDEXT0,\n signal: c.MACDEXT1,\n divergence: c.MACDEXT2,\n };\n var macdfix = {\n macd: c.MACDFIX0,\n signal: c.MACDFIX1,\n divergence: c.MACDFIX2,\n };\n var stoch = { K: c.STOCH0, D: c.STOCH1 };\n\n var stochf = { K: c.STOCHF0, D: c.STOCHF1 };\n\n var stochrsi = { K: c.STOCHRSI0, D: c.STOCHRSI1 };\n\n data.push({\n date: tempDate,\n open: +c.Open,\n high: +c.High,\n low: +c.Low,\n close: +c.Close,\n volume: c.Volume,\n\n // Overlap Studies\n BB: bb, //\n DEMA: +c.DEMA, //\n EMA: +c.EMA, //\n HT_TRENDLINE: +c.HT_TRENDLINE, //\n KAMA: +c.KAMA, //\n MAMA: +c.MAMA0, //\n FAMA: +c.MAMA1, //\n MIDPOINT: +c.MIDPOINT, //\n MIDPRICE: +c.MIDPRICE, //\n SAR: +c.SAR, //\n SAREXT: +c.SAREXT, //\n SMA: +c.SMA, //\n T3: +c.T3, //\n TEMA: +c.TEMA, //\n TRIMA: +c.TRIMA, //\n WMA: +c.WMA, //\n\n // MomentumIndicators\n ADX: c.ADX, //\n ADXR: c.ADXR,\n APO: c.APO,\n AROON0: c.AROON0,\n AROON1: c.AROON1,\n AROONOSC: c.AROONOSC,\n BOP: c.BOP,\n CCI: c.CCI,\n CMO: c.CMO,\n DX: c.DX,\n MACD: macd,\n MACDEXT: macdext,\n MACDFIX: macdfix,\n MFI: c.MFI,\n MINUS_DI: c.MINUS_DI,\n MINUS_DM: c.MINUS_DM,\n MOM: c.MOM,\n PLUS_DI: c.PLUS_DI,\n PLUS_DM: c.PLUS_DM,\n PPO: c.PPO,\n ROC: c.ROC,\n ROCP: c.ROCP,\n ROCR: c.ROCR,\n ROCR100: c.ROCR100,\n RSI: c.RSI,\n STOCH: stoch,\n STOCHF: stochf,\n STOCHRSI: stochrsi,\n TRIX: c.TRIX,\n ULTOSC: c.ULTOSC,\n WILLR: c.WILLR,\n\n // Volume Incdicators\n AD: c.AD,\n ADOSC: c.ADOSC,\n OBV: c.OBV,\n\n // Volatility Indicators\n ATR: c.ATR,\n NATR: c.NATR,\n TRANGE: c.TRANGE,\n\n // Price Transform\n AVGPRICE: c.AVGPRICE,\n MEDPRICE: c.MEDPRICE,\n TYPPRICE: c.TYPPRICE,\n WCLPRICE: c.WCLPRICE,\n\n // Cycle Indicators\n HT_DCPERIOD: c.HT_DCPERIOD,\n HT_DCPHASE: c.HT_DCPHASE,\n HT_PHASOR0: c.HT_PHASOR0,\n HT_PHASOR1: c.HT_PHASOR1,\n HT_SINE0: c.HT_SINE0,\n HT_SINE1: c.HT_SINE1,\n HT_TRENDMODE: c.HT_TRENDMODE,\n });\n });\n\n // console.log(data);\n if (data.length === 0) {\n return (\n <div\n style={{\n alignItems: \"center\",\n justifyContent: \"center\",\n width: \"100vw\",\n display: \"flex\",\n height: window.innerHeight - 100,\n }}\n >\n <Spinner animation=\"border\" role=\"status\" />\n </div>\n );\n }\n let indicators = {};\n studies.map((study) => {\n indicators[study] = !indicators[study];\n });\n switch (currentType) {\n case \"CandleStick\":\n return (\n <ChartContainer>\n {/* <TypeChooser>\n {(type) => (\n <CandleStick\n ref={this.saveNode}\n type={type}\n data={data}\n indicators={indicators}\n />\n )}\n </TypeChooser> */}\n\n <CandleStick\n type={\"canvas + svg\"}\n data={data}\n indicators={indicators}\n selectedStudy={selectedStudy}\n />\n </ChartContainer>\n );\n break;\n case \"Area\":\n return (\n <ChartContainer>\n <AreaChart\n type={\"canvas + svg\"}\n data={data}\n indicators={indicators}\n selectedStudy={selectedStudy}\n />\n </ChartContainer>\n );\n break;\n case \"Line\":\n return (\n <ChartContainer>\n <LineChart\n type={\"canvas + svg\"}\n data={data}\n indicators={indicators}\n selectedStudy={selectedStudy}\n />\n </ChartContainer>\n );\n break;\n case \"OHLC\":\n return (\n <ChartContainer>\n <OHLCChart\n type={\"canvas + svg\"}\n data={data}\n indicators={indicators}\n selectedStudy={selectedStudy}\n />\n </ChartContainer>\n );\n break;\n case \"Kagi\":\n return (\n <ChartContainer>\n <KagiChart\n type={\"canvas + svg\"}\n data={data}\n indicators={indicators}\n selectedStudy={selectedStudy}\n />\n </ChartContainer>\n );\n break;\n case \"Renko\":\n return (\n <ChartContainer>\n <RenkoChart\n type={\"canvas + svg\"}\n data={data}\n indicators={indicators}\n selectedStudy={selectedStudy}\n />\n </ChartContainer>\n );\n break;\n case \"Point & Figure\":\n return (\n <ChartContainer>\n <PointFigureChart\n type={\"canvas + svg\"}\n data={data}\n indicators={indicators}\n selectedStudy={selectedStudy}\n />\n </ChartContainer>\n );\n break;\n default:\n break;\n }\n}\nconst mapStateToProps = (state) => {\n return {\n prices: state.prices,\n currentType: state.currentChartType,\n studies: state.loadStudies,\n selectedStudy: state.selectedStudy,\n currentCompany: state.currentCompany,\n };\n};\n\nexport default connect(mapStateToProps, {\n fetchPrices,\n setChartType,\n loadStudies,\n latestStudy,\n setName,\n})(RenderChart);\n\nconst ChartContainer = styled.div`\n justify-content: center;\n flex: 0.7;\n /* height: 560px; */\n /* overflow-y: scroll; */\n`;\n"
},
{
"alpha_fraction": 0.6645161509513855,
"alphanum_fraction": 0.6741935610771179,
"avg_line_length": 25.600000381469727,
"blob_id": "075fc0adc835d48e66959ae3af0fa3e54f73be4b",
"content_id": "1311292c48440fa048322a1683895de04c44feb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 930,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 35,
"path": "/API/models/prophet.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\n\ndf = pd.read_csv('../data/3MINDIA.csv')\ndf['Date'] = df['Date'].apply(str)\ndf['Date'] = pd.to_datetime(df.Date,format='%Y-%m-%d')\ndf.set_index(\"Date\", drop=False, inplace=True)\n\ndates = df['Date']\ndf.drop(['Date'], axis=1, inplace=True)\nscaler = MinMaxScaler(feature_range=(0, 1))\ndata = scaler.fit_transform(df)\ndata = pd.DataFrame(data, index=df.index, columns=df.columns)\ndata['Date'] = dates\n\nX = data\ncut = int(len(X)*0.8)\n\nX_train = X[:cut]\nX_test = X[cut:]\nX_train[['Date', 'Close']]\n\nfrom fbprophet import Prophet\n\nmodel = Prophet()\nmodel.fit(X_train[[\"Date\", \"Close\"]].rename(columns={\"Date\": \"ds\", \"Close\": \"y\"}))\n\nforecast = model.predict(X_test[[\"Date\", \"Close\"]].rename(columns={\"Date\": \"ds\"}))\n\nplt.figure(figsize=(20, 5))\nplt.plot(X['Close'])\nplt.plot(X_test.index, forecast.yhat)\nplt.show()"
},
{
"alpha_fraction": 0.5835901498794556,
"alphanum_fraction": 0.599768877029419,
"avg_line_length": 23.961538314819336,
"blob_id": "1c5fd5abd2c0f74d7259948f6c9feb5f4fd3722f",
"content_id": "6d769413bf48046a659b37384b6802cda3353eda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2596,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 104,
"path": "/Frontend/analysis/src/components/Dashboard/Sidebar.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React, { useState } from \"react\";\nimport styled from \"styled-components\";\nimport { Link } from \"react-router-dom\";\nimport * as FaIcons from \"react-icons/fa\";\nimport * as AiIcons from \"react-icons/ai\";\nimport { IconContext } from \"react-icons/lib\";\nimport { connect } from \"react-redux\";\nimport AnalysisSidebar from \"./AnalysisSidebar\";\nimport CustomizationNavChart from \"./CustomizationNavChart\";\nimport CustomizationNavStudy from \"./CustomizationNavStudy\";\nimport AnalysisPopup from \"./prediction/AnalysisPopup\";\n\nfunction Sidebar({ currentCompanyName }) {\n const [sidebar, setSidebar] = useState(false);\n\n const showSidebar = () => setSidebar(!sidebar);\n\n return (\n <>\n <IconContext.Provider value={{ color: \"#fff\" }}>\n {/* global value for all icons */}\n <Nav>\n {/* <NavIcon to=\"#\">\n <FaIcons.FaBars onClick={showSidebar} />\n </NavIcon> */}\n <CompanyName>{currentCompanyName}</CompanyName>\n <CustomizationNavChart\n title={\"Display\"}\n items={[\n \"CandleStick\",\n \"Area\",\n \"Line\",\n \"OHLC\",\n \"Kagi\",\n \"Renko\",\n \"Point & Figure\",\n ]}\n />\n <CustomizationNavStudy title={\"Study\"} />\n <AnalysisPopup />\n </Nav>\n\n <SidebarNav sidebar={sidebar}>\n <SidebarWrap>\n <NavIcon to=\"#\">\n <AiIcons.AiOutlineClose onClick={showSidebar} />\n </NavIcon>\n <AnalysisSidebar />\n </SidebarWrap>\n </SidebarNav>\n </IconContext.Provider>\n </>\n );\n}\nconst mapStateToProps = (state) => ({\n currentCompanyName: state.currentCompany,\n});\n\nexport default connect(mapStateToProps)(Sidebar);\n\nconst CompanyName = styled.h4`\n margin-left: 250px;\n margin-top: 0.5rem;\n font-size: 1.5rem;\n height: 40px;\n display: flex;\n justify-content: flex-start;\n align-items: center;\n`;\n\nconst Nav = styled.div`\n background: #a5aab5;\n height: 40px;\n display: flex;\n justify-content: flex-start;\n align-items: center;\n`;\n\nconst NavIcon = styled(Link)`\n margin-left: 1rem;\n font-size: 1.5rem;\n height: 40px;\n display: flex;\n justify-content: flex-start;\n align-items: center;\n`;\n\nconst SidebarNav = styled.nav`\n background: #a5aab5;\n width: 250px;\n display: flex;\n justify-content: center;\n margin-top: 60px;\n height: 100vh;\n position: fixed;\n top: 0;\n left: ${({ sidebar }) => (sidebar ? \"0\" : \"-100%\")};\n transition: 350ms;\n z-index: 10;\n`;\n\nconst SidebarWrap = styled.div`\n width: 100%;\n`;\n"
},
{
"alpha_fraction": 0.7427884340286255,
"alphanum_fraction": 0.7427884340286255,
"avg_line_length": 22.16666603088379,
"blob_id": "d92006b2cff283e53c9418685434107cf972aa11",
"content_id": "7159844539304c2575c6066eadc7da763f082451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 416,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 18,
"path": "/API/app.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_restful import Api\nfrom data_loading import CompanyData\nfrom flask_cors import CORS, cross_origin\nfrom models import ARIMA\n\napp = Flask(__name__)\napp.secret_key = 'college-project'\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\napi = Api(app)\n\napi.add_resource(CompanyData, '/data/<string:name>')\napi.add_resource(ARIMA, '/ARIMA/<string:name>')\n\napp.run(debug=True)"
},
{
"alpha_fraction": 0.6485355496406555,
"alphanum_fraction": 0.6841003894805908,
"avg_line_length": 18.91666603088379,
"blob_id": "fd5ec676b2d3cd64cabe9bebc253070a8b9b9ed3",
"content_id": "cf0348253e25d5e11e118f871901290e7c7dc053",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 24,
"path": "/Frontend/analysis/src/components/Dashboard/AnalysisSidebar.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport styled from \"styled-components\";\n\nfunction AnalysisSidebar() {\n return <SidebarContainer>Analysis</SidebarContainer>;\n}\n\nexport default AnalysisSidebar;\n\nconst SidebarContainer = styled.div`\n background-color: #cfd5e3;\n color: black;\n flex: 0.3;\n max-width: 260px;\n /* align-items: center; */\n justify-content: center;\n display: flex;\n\n /* > hr {\n margin-top: 10px;\n margin-bottom: 10px;\n border: 1px solid #49274b;\n } */\n`;\n"
},
{
"alpha_fraction": 0.8709677457809448,
"alphanum_fraction": 0.8709677457809448,
"avg_line_length": 31,
"blob_id": "116cc30d2326c5d01d25a96553bdfb20836e3abb",
"content_id": "3fbf283fcadb2cf8ca60c4746cb7fe4bff846403",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/API/technicals/__init__.py",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "from .indicators import Studies"
},
{
"alpha_fraction": 0.5851721167564392,
"alphanum_fraction": 0.5878199338912964,
"avg_line_length": 25.9761905670166,
"blob_id": "ce2fe97c1a1fe452cb2a7738ca6d6d3da650288b",
"content_id": "2017cc5ef307d95a9359884d85d767232805cdcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1133,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 42,
"path": "/Frontend/analysis/src/components/Dashboard/StudyDropdown.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport { Dropdown, DropdownButton } from \"react-bootstrap\";\nimport { loadStudies, latestStudy } from \"../../actions\";\nimport { connect } from \"react-redux\";\nimport { Studies } from \"./studiesList\";\n\nfunction StudyDropdown({ list, name, study, loadStudies, latestStudy }) {\n const updateType = (event, item) => {\n event.preventDefault();\n loadStudies(item);\n if (Studies.includes(item)) latestStudy(item);\n };\n return (\n <DropdownButton\n id=\"dropdown-variants-secondary\"\n variant=\"light\"\n title={name}\n drop=\"right\"\n >\n <div style={{ maxHeight: \"400px\", overflowY: \"auto\" }}>\n {list.map((item) => {\n return (\n <Dropdown.Item\n href=\"#\"\n onClick={(event) => updateType(event, item)}\n >\n {item}\n </Dropdown.Item>\n );\n })}\n </div>\n </DropdownButton>\n );\n}\nconst mapStateToProps = (state) => ({\n study: state.loadStudies,\n selectedStudy: state.selectedStudy,\n});\n\nexport default connect(mapStateToProps, { loadStudies, latestStudy })(\n StudyDropdown\n);\n"
},
{
"alpha_fraction": 0.5671695470809937,
"alphanum_fraction": 0.576508641242981,
"avg_line_length": 24.77777862548828,
"blob_id": "104a4802db8e83d0e98e4f13626ffb12c4b48340",
"content_id": "bac29fac0244fb4f5a7c314fb0b3f446efd89e1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2784,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 108,
"path": "/Frontend/analysis/src/components/Dashboard/Search.js",
"repo_name": "phoenixx1/Market-Analysis",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport styled from \"styled-components\";\nimport TextField from \"@material-ui/core/TextField\";\nimport Autocomplete from \"@material-ui/lab/Autocomplete\";\nimport { connect } from \"react-redux\";\nimport { fetchNames } from \"../../actions\";\nimport { db, auth } from \"../../Firebase\";\nimport firebase from \"firebase\";\nimport { useCollection } from \"react-firebase-hooks/firestore\";\n\nclass Search extends React.Component {\n constructor(props) {\n super(props);\n this.state = {\n input: \"\",\n companyList: [],\n // authUser: null,\n };\n }\n\n componentDidMount() {\n this.props.fetchNames();\n // this.listener = this.firebase.auth.useAuthState((authUser) => {\n // authUser\n // ? this.setState({ authUser })\n // : this.setState({ authUser: null });\n // });\n }\n\n addToList(event, value) {\n event.preventDefault();\n\n if (value.length != 0) {\n // const companies = [];\n // db.collection(\"watchlist\")\n // .doc(\"list\")\n // .collection(\"companies\")\n // .get()\n // .then((comp) => {\n // comp.docs.forEach((doc) => {\n // companies.push(String(doc.data().company));\n // });\n // });\n db.collection(\"watchlist\").doc(\"list\").collection(\"companies\").add({\n company: value,\n timestamp: firebase.firestore.FieldValue.serverTimestamp(),\n });\n }\n }\n\n render() {\n let companyList = [];\n this.props.names.map((name) => {\n return name.map((n) => {\n companyList.push(n.SYMBOL); // + \" : \" + n.SYMBOL);\n });\n });\n\n return (\n <SearchContainer>\n <AutocompleteContainer\n onChange={(event, value) => this.addToList(event, value)}\n freeSolo\n clearOnBlur\n disableClearable\n options={companyList}\n renderInput={(params) => (\n <AutoTextField\n {...params}\n id=\"standard-secondary\"\n label=\"Search Company Name\"\n InputProps={{ ...params.InputProps, type: \"search\" }}\n />\n )}\n />\n </SearchContainer>\n );\n }\n}\n\nconst mapStateToProps = (state) => {\n return { names: state.company };\n};\n\nexport default connect(mapStateToProps, { fetchNames })(Search);\n\nconst AutocompleteContainer = styled(Autocomplete)`\n width: 230px;\n margin-top: -30px;\n height: 5vh;\n`;\n\nconst AutoTextField = styled(TextField)`\n /* background-color: lightgray; */\n height: 5vh;\n`;\n\nconst SearchContainer = styled.div`\n /* Use rgba value for not applying opacity property to child elements */\n background: rgba(94, 108, 130, 0.5);\n margin-top: 15px;\n width: 100%;\n height: 5vh;\n display: flex;\n border-radius: 50px;\n align-items: center;\n justify-content: center;\n`;\n"
}
] | 32 |
allwin-baby/Gesture-Controlled-Pc
|
https://github.com/allwin-baby/Gesture-Controlled-Pc
|
17197d07d8050fcb1fa098cd0986238094836531
|
4364b0f9887cf5cf89d1b464d9b85a57e830a264
|
ba4dba798b47a987bdf7158562e986e1df417bbe
|
refs/heads/main
| 2023-02-13T17:30:49.969156 | 2021-02-19T15:24:55 | 2021-02-19T15:24:55 | 329,060,923 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7705627679824829,
"alphanum_fraction": 0.7835497856140137,
"avg_line_length": 56.75,
"blob_id": "7076d46e818c3a73509aaf6f725277010fc93bd7",
"content_id": "8dc7eef6aee6a2881acadc202c26f659155157ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 231,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 4,
"path": "/README.md",
"repo_name": "allwin-baby/Gesture-Controlled-Pc",
"src_encoding": "UTF-8",
"text": "# COMPUTER-VISION\nControl PC by predefined gestures using Computer Vision and Python GUI automation\n\n[](https://www.youtube.com/watch?v=uKGNDZPB9Jg)\n"
},
{
"alpha_fraction": 0.49204424023628235,
"alphanum_fraction": 0.5402333736419678,
"avg_line_length": 31.352941513061523,
"blob_id": "fa964b24394a9a5071bd58a71f76ed262670df54",
"content_id": "a72cb65e4d7d9f3bc7d86008170b2f653d6e837c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6599,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 204,
"path": "/SwipeMusicPlayer/swipeMusicPlayer.py",
"repo_name": "allwin-baby/Gesture-Controlled-Pc",
"src_encoding": "UTF-8",
"text": "from imutils.video import VideoStream\nimport numpy as np\nimport argparse\nimport cv2\nimport imutils\nimport time\n\nfrom directkeys import W, A, S, D ,P\nfrom directkeys import PressKey, ReleaseKey \n\ndef nothing(x):\n pass\ncv2.namedWindow('test')\ncv2.resizeWindow('test',(560,560))\ncv2.createTrackbar('HUE LOW','test',0,179,nothing) # max value and initial pos \ncv2.createTrackbar('HUE HIGH','test',179,179,nothing)\ncv2.createTrackbar('SATURATION LOW','test',0,255,nothing)\ncv2.createTrackbar('SATURATION HIGH','test',255,255,nothing)\ncv2.createTrackbar('VALUE LOW','test',0,255,nothing)\ncv2.createTrackbar('VALUE HIGH','test',255,255,nothing) \ncv2.createTrackbar('KernalOpenSize','test',5,50,nothing) \ncv2.createTrackbar('KernalCloseSize','test',5,50,nothing)\n\nprev = None\nnow = None\nvs = VideoStream(src=0).start()\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ntime.sleep(2.0)\ninitial = True\nflag = False\ncurrent_key_pressed = set()\ncircle_radius = 30\nwindowSize = 160\nlr_counter = 0\n\n\nwhile True:\n keyPressed = False\n keyPressed_lr = False\n # grab the current frame\n frame = vs.read()\n frame= cv2.flip(frame,1)\n show = frame.copy()\n half = cv2.resize(show, (0, 0), fx = 0.5, fy = 0.5) \n cv2.imshow('testtt',half)\n height,width = frame.shape[:2]\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=5, \n minSize=(20, 20)\n )\n\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x-50, y-50), (x+w+50, y+h+50), (0, 2),-1)\n\n\n\n frame = imutils.resize(frame, width=600)\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\n kernalOpenSize= cv2.getTrackbarPos('KernalOpenSize','test')\n kernalCloseSize= cv2.getTrackbarPos('KernalCloseSize','test')\n kernalOpen = np.ones((kernalOpenSize,kernalOpenSize))\n kernalClose = np.ones((kernalCloseSize,kernalCloseSize))\n \n hl= cv2.getTrackbarPos('HUE LOW','test')\n sl= cv2.getTrackbarPos('SATURATION LOW','test')\n vl= cv2.getTrackbarPos('VALUE LOW','test')\n\n hh= cv2.getTrackbarPos('HUE HIGH','test')\n sh= cv2.getTrackbarPos('SATURATION HIGH','test')\n vh =cv2.getTrackbarPos('VALUE HIGH','test')\n threshold = cv2.getTrackbarPos('Threshold_below','test')\n tcovert = cv2.getTrackbarPos('threshold_To','test')\n \n lowerbound = np.array([hl,sl,vl])\n upperbound = np.array([hh,sh,vh])\n mask = cv2.inRange(hsv, lowerbound, upperbound)\n\n mask = cv2.erode(mask, None, iterations=2)\n\n mask = cv2.dilate(mask, None, iterations=2)\n cv2.imshow('mask3',mask)\n\n left_mask = mask[:,0:width//2,]\n right_mask = mask[:,width//2:,]\n\n cnts_left = cv2.findContours(left_mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts_left = imutils.grab_contours(cnts_left)\n center_left = None\n\n cnts_right = cv2.findContours(right_mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts_right = imutils.grab_contours(cnts_right)\n center_right = None\n \n\n if len(cnts_left) > 0: \n\n c = max(cnts_left, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n\n center_left = (int(M[\"m10\"] / (M[\"m00\"]+0.000001)), int(M[\"m01\"] / (M[\"m00\"]+0.000001)))\n \n\n if radius > circle_radius:\n\n cv2.circle(frame, (int(x), int(y)), int(radius),\n (0, 255, 255), 2)\n cv2.circle(frame, center_left, 5, (0, 0, 255), -1)\n\n \n if center_left[1] < (height/2 - windowSize//2):\n if prev ==None:\n prev = \"LEFT\" \n else:\n prev = now\n now = 'LEFT'\n if prev =='UP' and now =='LEFT':\n print('swipe LEFT')\n PressKey(A)\n ReleaseKey(A)\n cv2.putText(frame,'LEFT',(20,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255))\n\n elif center_left[1] > (height/2 + windowSize//2):\n if prev ==None:\n prev = \"RIGHT\" \n else:\n prev = now\n now = 'RIGHT'\n if prev =='LEFT' and now =='RIGHT':\n print('swipe down')\n PressKey(D)\n PressKey(D)\n PressKey(D)\n ReleaseKey(D)\n\n cv2.putText(frame,'RIGHT',(20,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255))\n \n \n\n if len(cnts_right) > 0:\n c2 = max(cnts_right, key=cv2.contourArea)\n ((x2, y2), radius2) = cv2.minEnclosingCircle(c2)\n M2 = cv2.moments(c2)\n center_right = (int(M2[\"m10\"] / (M2[\"m00\"]+0.000001)), int(M2[\"m01\"] / (M2[\"m00\"]+0.000001)))\n center_right = (center_right[0]+width//2,center_right[1])\n \n\n if radius2 > circle_radius:\n\n cv2.circle(frame, (int(x2)+width//2, int(y2)), int(radius2),\n (0, 255, 255), 2)\n cv2.circle(frame, center_right, 5, (0, 0, 255), -1)\n if center_right[1] < (height//2 - windowSize//2):\n if prev ==None:\n prev = \"UP\" \n else:\n prev = now\n now = 'UP'\n if prev =='RIGHT' and now =='UP':\n PressKey(P)\n ReleaseKey(P)\n if prev =='DOWN' and now =='UP':\n print('swipe up')\n PressKey(W)\n PressKey(W)\n PressKey(W)\n ReleaseKey(W)\n cv2.putText(frame,'UP',(200,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255))\n \n elif center_right[1] > (height//2 + windowSize//2):\n if prev ==None:\n prev = \"DOWN\" \n else:\n prev = now\n now = 'DOWN'\n if prev =='RIGHT' and now =='DOWN':\n print('swipe right ')\n PressKey(S)\n PressKey(S)\n ReleaseKey(S)\n cv2.putText(frame,'DOWN',(200,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255))\n \n\n frame_copy = frame.copy()\n frame_copy = cv2.rectangle(frame_copy,(0,height//2 - windowSize//2),(width,height//2 + windowSize//2),(255,0,0),2)\n cv2.imshow(\"Frame\", frame_copy)\n\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n \n\nvs.stop() \ncv2.destroyAllWindows()"
},
{
"alpha_fraction": 0.5310291647911072,
"alphanum_fraction": 0.5869431495666504,
"avg_line_length": 37.70121765136719,
"blob_id": "d57443667691e3075b1f55fa3e7f006b61702c27",
"content_id": "6e36ef9d28121f18cc98b2416fb252c01af13fb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6510,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 164,
"path": "/Mouse/mouse.py",
"repo_name": "allwin-baby/Gesture-Controlled-Pc",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nfrom pynput.mouse import Button,Controller\r\nimport wx\r\nimport math\r\n\r\nmouse = Controller()\r\napp = wx.App(False)\r\n(sx,sy) = wx.DisplaySize() \r\n(camx,camy) = (640 ,480) \r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(cv2.CAP_PROP_FPS, 30)\r\ncap.set(3,camx)\r\ncap.set(4,camy)\r\n\r\nmlocOld = np.array([0,0])\r\nmouseLoc = np.array([0,0])\r\ndampening_factor = 2\r\n\r\ndef nothing(x):\r\n pass\r\ncv2.namedWindow('test')\r\ncv2.resizeWindow('test',(560,560))\r\ncv2.createTrackbar('HUE LOW','test',0,179,nothing) # max value and initial pos \r\ncv2.createTrackbar('HUE HIGH','test',179,179,nothing)\r\ncv2.createTrackbar('SATURATION LOW','test',0,255,nothing)\r\ncv2.createTrackbar('SATURATION HIGH','test',255,255,nothing)\r\ncv2.createTrackbar('VALUE LOW','test',0,255,nothing)\r\ncv2.createTrackbar('VALUE HIGH','test',255,255,nothing) \r\ncv2.createTrackbar('KernalOpenSize','test',5,50,nothing) \r\ncv2.createTrackbar('KernalCloseSize','test',5,50,nothing)\r\ncv2.createTrackbar('clicklen','test',0,20,nothing) \r\ncv2.createTrackbar('Threshold below ','test',0,255,nothing)\r\ncv2.createTrackbar('threshold_To','test',255,255,nothing) \r\n\r\nglobal color_set\r\nglobal clicked\r\nclicked =False\r\ncolor_set = False\r\n\r\nwhile 1:\r\n global x1,x2,x3,y1,y2,y3\r\n suc,frame = cap.read()\r\n frame = cv2.flip(frame,1)\r\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\r\n\r\n\r\n if color_set ==False:\r\n kernalOpenSize= cv2. getTrackbarPos('KernalOpenSize','test')\r\n kernalCloseSize= cv2.getTrackbarPos('KernalCloseSize','test')\r\n kernalOpen = np.ones((kernalOpenSize,kernalOpenSize))\r\n kernalClose = np.ones((kernalCloseSize,kernalCloseSize))\r\n clicklen = cv2.getTrackbarPos('clicklen','test')\r\n \r\n hl= cv2.getTrackbarPos('HUE LOW','test')\r\n sl= cv2.getTrackbarPos('SATURATION LOW','test')\r\n vl= cv2.getTrackbarPos('VALUE LOW','test')\r\n\r\n hh= cv2.getTrackbarPos('HUE HIGH','test')\r\n sh= cv2.getTrackbarPos('SATURATION HIGH','test')\r\n vh =cv2.getTrackbarPos('VALUE HIGH','test')\r\n threshold = cv2.getTrackbarPos('Threshold_below','test')\r\n tcovert = cv2.getTrackbarPos('threshold_To','test')\r\n \r\n lowerbound = np.array([hl,sl,vl])\r\n upperbound = np.array([hh,sh,vh])\r\n \r\n mask = cv2.inRange(hsv,lowerbound,upperbound) \r\n mask = cv2.erode(mask, None, iterations=2)\r\n mask = cv2.dilate(mask, None, iterations=2)\r\n\r\n #grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n #value = (35, 35)\r\n #blurred = cv2.GaussianBlur(grey, value, 0)\r\n\r\n\r\n maskOpen = cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernalOpen)\r\n maskClose = cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernalClose)\r\n maskFinal = maskClose\r\n\r\n countours,hierarchy = cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n cv2.drawContours(frame,countours,-1,(255,255,255),3)\r\n else:\r\n kernalOpenSize= cv2.getTrackbarPos('KernalOpenSize','test')\r\n kernalCloseSize= cv2.getTrackbarPos('KernalCloseSize','test')\r\n kernalOpen = np.ones((kernalOpenSize,kernalOpenSize))\r\n kernalClose = np.ones((kernalCloseSize,kernalCloseSize))\r\n clicklen = cv2.getTrackbarPos('clicklen','test')\r\n\r\n hl= cv2.getTrackbarPos('HUE LOW','test')\r\n sl= cv2.getTrackbarPos('SATURATION LOW','test')\r\n vl= cv2.getTrackbarPos('VALUE LOW','test')\r\n\r\n hh= cv2.getTrackbarPos('HUE HIGH','test')\r\n sh= cv2.getTrackbarPos('SATURATION HIGH','test')\r\n vh =cv2.getTrackbarPos('VALUE HIGH','test')\r\n lowerbound = np.array([hl,sl,vl])\r\n upperbound = np.array([hh,sh,vh])\r\n lowerbound = np.array([hl,sl,vl])\r\n upperbound = np.array([hh,sh,vh])\r\n \r\n mask = cv2.inRange(hsv,lowerbound,upperbound) \r\n mask = cv2.erode(mask, None, iterations=5)\r\n mask = cv2.dilate(mask, None, iterations=5)\r\n cv2.imshow('mask3',mask)\r\n\r\n maskOpen = cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernalOpen)\r\n maskClose = cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernalClose)\r\n maskFinal = maskClose\r\n\r\n countours,hierarchy = cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n cv2.drawContours(frame,countours,-1,(255,255,255),3)\r\n\r\n if len(countours) ==2:\r\n if cv2.contourArea(countours[0]) > 50 and cv2.contourArea(countours[1]) > 50 : \r\n x1,y1,w1,h1 = cv2.boundingRect(countours[0])\r\n x2,y2,w2,h2 = cv2.boundingRect(countours[1])\r\n cv2.rectangle(frame,(x1,y1),(x1+w1,y1+h1),(0,255,0),2)\r\n cv2.rectangle(frame,(x2,y2),(x2+w2,y2+h2),(0,255,0),2)\r\n cx1 = x1+w1/2\r\n cy1 = y1+h1/2\r\n cx2 = x2+ w2/2\r\n cy2 = y2+h2/2\r\n cx = (cx1+cx2)/2\r\n cy = (cy1+cy2)/2\r\n\r\n\r\n mouse.position = (cx*sx/camx,cy*sy/camy)\r\n frame = cv2.putText(frame, str(str(cx1) +\" \"+str(cy1)), (int(cx1),int(cy1)), cv2.FONT_HERSHEY_SIMPLEX ,1,(255,255,0),2) \r\n frame = cv2.putText(frame, str(str(cx2)+\" \"+str(cy2)), (int(cx2),int(cy2)), cv2.FONT_HERSHEY_SIMPLEX ,1,(255,255,0),2) \r\n cv2.line(frame,(int(cx1),int(cy1)),(int(cx2),int(cy2)),(255,255,255),2)\r\n\r\n\r\n cv2.putText(frame,str(math.sqrt(abs(cx1-cx2)+abs( cy1-cy2))), (50,50,),cv2.FONT_HERSHEY_SIMPLEX ,1,(255,255,0),2) \r\n if clicked ==True:\r\n print('relasesd')\r\n mouse.release(Button.left)\r\n clicked = False\r\n elif len(countours) == 1:\r\n if (cv2.contourArea(countours[0])) >300:\r\n x1,y1,w1,h1 = cv2.boundingRect(countours[0])\r\n cv2.rectangle(frame,(x1,y1),(x1+w1,y1+h1),(0,255,0),2)\r\n cx = x1+w1/2\r\n cy = y1+h1/2\r\n\r\n mouse.position = (cx*sx/camx,cy*sy/camy)\r\n cv2.circle(frame,(int(cx),int(cy)),2,(0,255,0),2)\r\n\r\n if clicked ==False:\r\n print('clicked')\r\n mouse.press(Button.left)\r\n clicked = True\r\n else:\r\n pass\r\n k = cv2.waitKey(2)\r\n if k==ord('s'):\r\n color_set =True\r\n cv2.imshow('frame',frame)\r\n half = cv2.resize(frame, (0, 0), fx = 0.5, fy = 0.5) \r\n cv2.imshow('testtt',half)\r\n if cv2.waitKey(1) == ord('a'):\r\n break\r\ncv2.destroyAllWindows() "
},
{
"alpha_fraction": 0.516853928565979,
"alphanum_fraction": 0.5813046097755432,
"avg_line_length": 32.09574508666992,
"blob_id": "f41fd1400386f63eca99f9337b7e5d5ad39defb4",
"content_id": "8df837ba3074007048c2532498c54238e05219ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6408,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 188,
"path": "/RaceWheel/angle.py",
"repo_name": "allwin-baby/Gesture-Controlled-Pc",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport time\r\nfrom directkeys import Left,Right,Down,Up\r\nfrom directkeys import PressKey, ReleaseKey \r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(cv2.CAP_PROP_FPS, 1)\r\n(camx,camy) = (640,480)\r\nfps = cap.get(cv2.CAP_PROP_FPS)\r\nprint(fps)\r\ncap.set(3,camx)\r\ncap.set(4,camy)\r\nprint(cap.get(3))\r\nprint(cap.get(4))\r\n\r\n\r\ndef nothing(x):\r\n pass\r\ncv2.namedWindow('test')\r\ncv2.resizeWindow('test',(560,560))\r\ncv2.createTrackbar('HUE LOW','test',0,179,nothing) # max value and initial pos \r\ncv2.createTrackbar('HUE HIGH','test',179,179,nothing)\r\ncv2.createTrackbar('SATURATION LOW','test',0,255,nothing)\r\ncv2.createTrackbar('SATURATION HIGH','test',255,255,nothing)\r\ncv2.createTrackbar('VALUE LOW','test',0,255,nothing)\r\ncv2.createTrackbar('VALUE HIGH','test',255,255,nothing) \r\ncv2.createTrackbar('Threshold below ','test',0,255,nothing)\r\ncv2.createTrackbar('threshold_To','test',255,255,nothing) \r\ncv2.createTrackbar('KernalOpenSize','test',5,50,nothing) \r\ncv2.createTrackbar('KernalCloseSize','test',5,50,nothing) \r\ncv2.createTrackbar('waitkey','test',1,1000,nothing) \r\n\r\n\r\nglobal blueclicked \r\nblueclicked =False\r\nglobal greenclicked \r\ngreenclicked =False\r\n\r\n\r\nwhile 1:\r\n global bx1,bx2,bx3,by1,by2,by3,gx1,gx2,gx3,gy1,gy2,gy3,bko,bkc,gko,gkc\r\n suc,frame = cap.read()\r\n frame = cv2.flip(frame,1)\r\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\r\n wait_key= cv2.getTrackbarPos('waitkey','test') \r\n if blueclicked ==False or greenclicked ==False:\r\n\r\n kernalOpenSize= cv2.getTrackbarPos('KernalOpenSize','test')\r\n kernalCloseSize= cv2.getTrackbarPos('KernalCloseSize','test')\r\n kernalOpen = np.ones((kernalOpenSize,kernalOpenSize))\r\n kernalClose = np.ones((kernalCloseSize,kernalCloseSize))\r\n\r\n\r\n hl= cv2.getTrackbarPos('HUE LOW','test')\r\n sl= cv2.getTrackbarPos('SATURATION LOW','test')\r\n vl= cv2.getTrackbarPos('VALUE LOW','test')\r\n\r\n hh= cv2.getTrackbarPos('HUE HIGH','test')\r\n sh= cv2.getTrackbarPos('SATURATION HIGH','test')\r\n vh =cv2.getTrackbarPos('VALUE HIGH','test')\r\n lowerbound = np.array([hl,sl,vl])\r\n upperbound = np.array([hh,sh,vh])\r\n \r\n\r\n\r\n mask = cv2.inRange(hsv,lowerbound,upperbound) \r\n bitwise = cv2.bitwise_and(frame,frame ,mask = mask)\r\n mask2 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\r\n\r\n\r\n maskOpen = cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernalOpen) \r\n maskClose = cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernalClose)\r\n maskFinal = maskClose\r\n\r\n\r\n countours,hierarchy = cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n cv2.drawContours(frame,countours,-1,(255,255,255),3)\r\n if blueclicked ==True and greenclicked ==True: \r\n \r\n blowerbound = np.array([bx1,bx2,bx3])\r\n bupperbound = np.array([by1,by2,by3])\r\n #blowerbound = np.array([97, 122 ,116])\r\n #bupperbound = np.array([119, 255, 255])\r\n\r\n\r\n bmask = cv2.inRange(hsv,blowerbound,bupperbound) \r\n bmask2 = cv2.cvtColor(bmask, cv2.COLOR_GRAY2BGR)\r\n \r\n\r\n bmaskOpen = cv2.morphologyEx(bmask,cv2.MORPH_OPEN,bko) \r\n bmaskClose = cv2.morphologyEx(bmask,cv2.MORPH_CLOSE,bkc)\r\n bmaskFinal = bmaskClose\r\n #contours\r\n bcountours,hierarchy = cv2.findContours(bmaskFinal.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n cv2.drawContours(frame,bcountours,-1,(255,255,255),3)\r\n\r\n glowerbound = np.array([gx1,gx2,gx3])\r\n gupperbound = np.array([gy1,gy2,gy3])\r\n #glowerbound = np.array([30, 111 ,48])\r\n #gupperbound = np.array([81, 158 ,111])\r\n\r\n gmask = cv2.inRange(hsv,glowerbound,gupperbound) \r\n gmask2 = cv2.cvtColor(gmask, cv2.COLOR_GRAY2BGR)\r\n\r\n\r\n gmaskOpen = cv2.morphologyEx(gmask,cv2.MORPH_OPEN,gko) \r\n gmaskClose = cv2.morphologyEx(gmask,cv2.MORPH_CLOSE,gkc)\r\n gmaskFinal = gmaskClose\r\n\r\n gcountours,hierarchy = cv2.findContours(gmaskFinal.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n cv2.drawContours(frame,gcountours,-1,(255,255,255),3)\r\n if len(gcountours) ==1 and len(bcountours) ==1:\r\n x1,y1,w1,h1 = cv2.boundingRect(gcountours[0])\r\n x2,y2,w2,h2 = cv2.boundingRect(bcountours[0])\r\n cv2.rectangle(frame,(x1,y1),(x1+w1,y1+h1),(0,255,0),2)\r\n cv2.rectangle(frame,(x2,y2),(x2+w2,y2+h2),(0,255,0),2)\r\n cx1 = int(x1+w1/2)\r\n cy1 = int(y1+h1/2)\r\n cx2 = int(x2+ w2/2)\r\n cy2 = int(y2+h2/2)\r\n cv2.line(frame,(cx1,cy1),(cx2,cy2),(255,255,255),2)\r\n print(cx1,cx2)\r\n import math\r\n PI =3.14159265\r\n if cx2 -cx1>=0:\r\n ReleaseKey(Down)\r\n PressKey(Up)\r\n else: \r\n ReleaseKey(Up)\r\n PressKey(Down)\r\n try:\r\n m1 = (cy2 - cy1) / abs(cx2 - cx1) \r\n A = math.atan(m1) * 180 / PI\r\n except:\r\n A= 0\r\n if A < -10:\r\n \r\n ReleaseKey(Right)\r\n PressKey(Left)\r\n\r\n if 10>A>-10:\r\n \r\n ReleaseKey(Right)\r\n ReleaseKey(Left)\r\n\r\n if A > 10:\r\n \r\n ReleaseKey(Left)\r\n PressKey(Right)\r\n\r\n \r\n frame = cv2.putText(frame, str(A), (20,20), cv2.FONT_HERSHEY_SIMPLEX ,1,(255,255,0),2) \r\n else:\r\n ReleaseKey(Left)\r\n ReleaseKey(Right) \r\n\r\n cv2.line(frame,(0,340),(640,340),(255,255,255),2)\r\n\r\n cv2.imshow('frame',frame)\r\n if cv2.waitKey(1) == ord('b'):\r\n print('b pressed')\r\n bx1,bx2,bx3 = hl,sl,vl\r\n by1,by2,by3 = hh,sh,vh\r\n bko= kernalOpen\r\n bkc = kernalClose\r\n blueclicked = True\r\n print(bx1,bx2,bx3)\r\n print(by1,by2,by3)\r\n print(bko,bkc)\r\n if cv2.waitKey(1) == ord('g'):\r\n print('g pressed')\r\n gko =kernalOpen\r\n gkc = kernalClose\r\n gx1,gx2,gx3 = hl,sl,vl\r\n gy1,gy2,gy3 = hh,sh,vh\r\n print(gx1,gx2,gx3)\r\n print(gy1,gy2,gy3)\r\n print(gko,gkc)\r\n greenclicked = True\r\n if cv2.waitKey(1) == ord('q'):\r\n break \r\n \r\ncv2.destroyAllWindows() \r\nprint(bx1,bx2,bx3)\r\nprint(by1,by2,by3) \r\nprint(gx1,gx2,gx3)\r\nprint(gy1,gy2,gy3)"
}
] | 4 |
jsolbrig/cookiecutter-satpy
|
https://github.com/jsolbrig/cookiecutter-satpy
|
56c35ff00062c4ac50e051828cd55d351fb051ee
|
102ee3b9a4abdf6d5424235cf740df01be5de74f
|
829a683a74740e3e9e9b1ee2369fe22b7f0f7e86
|
refs/heads/master
| 2020-05-26T04:52:29.959368 | 2019-05-28T20:40:08 | 2019-05-28T20:40:08 | 188,111,946 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6326062083244324,
"alphanum_fraction": 0.6326062083244324,
"avg_line_length": 27.09677505493164,
"blob_id": "8860f6418aca12df3a3475bdc23d83c93bc97b1f",
"content_id": "12fb35c6be294e8d083e5f730e984e6d836f23f4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 31,
"path": "/hooks/post_gen_project.py",
"repo_name": "jsolbrig/cookiecutter-satpy",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nfrom glob import glob\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\n\ndef remove_file(filepath):\n os.remove(os.path.join(PROJECT_DIRECTORY, filepath))\n\n\ndef remove_directory(dirpath):\n for fname in glob('{}/*'.format(os.path.join(PROJECT_DIRECTORY, dirpath))):\n remove_file(fname)\n os.rmdir(os.path.join(PROJECT_DIRECTORY, dirpath))\n\n\nif __name__ == '__main__':\n\n if '{{ cookiecutter.create_author_file }}' != 'y':\n remove_file('AUTHORS.rst')\n remove_file('docs/authors.rst')\n\n if '{{ cookiecutter.use_pytest }}' == 'y':\n remove_file('tests/__init__.py')\n\n # if '{{ cookiecutter.make_satpy_composite }}' != 'y':\n # remove_directory('{{cookiecutter.project_slug}}/composites')\n\n if 'Not open source' == '{{ cookiecutter.open_source_license }}':\n remove_file('LICENSE')\n"
}
] | 1 |
LeonMehl/SpaceInvaders
|
https://github.com/LeonMehl/SpaceInvaders
|
a8ae61ac7606562f3f9d066b6ca67bbaf58e015c
|
9dd154d6a658654ae11e92060d0dbede09110694
|
c76921c9afbfbb933cba0a2d714692297021caa7
|
refs/heads/main
| 2023-02-16T13:58:37.357924 | 2021-01-19T07:14:35 | 2021-01-19T07:14:35 | 304,568,220 | 0 | 1 | null | 2020-10-16T08:37:33 | 2020-12-02T14:25:21 | 2020-12-02T15:23:29 |
Jupyter Notebook
|
[
{
"alpha_fraction": 0.5411785244941711,
"alphanum_fraction": 0.56275874376297,
"avg_line_length": 31.440000534057617,
"blob_id": "d0a350c43bcf083627e6b3416eb57829faeb576d",
"content_id": "e49a5476fa3f52f56451f650c213e895617a0d53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11359,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 350,
"path": "/SpaceInvaders.py",
"repo_name": "LeonMehl/SpaceInvaders",
"src_encoding": "UTF-8",
"text": "import pygame\nimport random\nimport numpy as np\nimport cv2\nimport os.path\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, BatchNormalization, Activation, Dense, Dropout, Flatten, ZeroPadding2D, UpSampling2D\nfrom keras.layers.core import Lambda, RepeatVector, Reshape\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\nfrom keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D\nfrom keras.layers.merge import concatenate, add\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow import keras\nimport pandas as pd \nimport re\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom scipy.signal import savgol_filter\nimport math\n\npathname = r\"D:\\OneDrive - Hochschule Albstadt-Sigmaringen\\Studium\\Semester 5\\DesignCPS\"\ndatadirname = \"data\"\ntestdirname = \"test\"\nvaliddirname = \"valid\"\nmodeldirname = \"model\"\ndatacsvname = \"data.csv\"\nmodeljsonname=\"model-regr.json\"\nmodelweightname=\"model-regr.h5\"\ndim = (50,50) \nactionstonum = {\"RIGHT\": 0,\n \"LEFT\": 1,\n \"SPACE\" : 2,\n }\nnumtoactions = {0: \"RIGHT\",\n 1: \"LEFT\",\n 2: \"SPACE\",\n }\n\ndef create_q_model():\n # Network defined by the Deepmind paper\n inputs = layers.Input(shape=(dim[0], dim[1], 3,))\n\n # Convolutions on the frames on the screen\n layer1 = layers.Conv2D(32, 8, strides=4, activation=\"relu\")(inputs)\n layer2 = layers.Conv2D(64, 4, strides=2, activation=\"relu\")(layer1)\n layer3 = layers.Conv2D(64, 3, strides=1, activation=\"relu\")(layer2)\n\n layer4 = layers.Flatten()(layer3)\n\n layer5 = layers.Dense(512, activation=\"relu\")(layer4)\n action = layers.Dense(4, activation=\"linear\")(layer5)\n\n return keras.Model(inputs=inputs, outputs=action)\n\ndef run_game(learning_rate = 1.5e-06, epochs = 5, benchmin = 68.0):\n manual = False\n lr = [learning_rate for i in range(epochs)]\n\n iterations = len(lr)\n benches = []\n qms = []\n qps = []\n counter = 0\n\n for i in range(iterations):\n print(f\"{i}: learning rate: {lr[i]}\")\n print(benchmin)\n game = Game(lr[i], \"model-regr.h5\")\n k = 150 #40\n game.load_replay_memory()\n for j in range(k):\n game.initialize(i, j)\n game.run(j)\n bench, qm, qp = game.print_benchmark()\n benches.append(bench)\n qms.append(qm)\n qps.append(qp)\n game.save_replay_memory()\n game.save_checkpoint(f\"model-regr_{i}_{lr[i]:.9f}_{bench:.2f}.h5\")\n if bench < benchmin:\n benchmin = bench\n game.save_checkpoint()\n else:\n counter += 1\n if counter == 3:\n counter = 0\n lr *= 0.5 \n \n overallscore = game.print_overall_score()\n overallscores.append(overallscore)\n return benches, qms, qps\n\nmodel = create_q_model()\nmodel_json = model.to_json()\nwith open(os.path.join(pathname, modeldirname,modeljsonname), \"w\") as json_file:\n json_file.write(model_json)\nmodel.save_weights(os.path.join(pathname, modeldirname,modelweightname))\n\n\n\n\nclass Game:\n screen = None\n aliens = []\n rockets = []\n lost = False\n\n def __init__(self, width, height, lr=1e-3, checkpointparname=\"model-regr.h5\"):\n pygame.init()\n self.width = width\n self.height = height\n self.screen = pygame.display.set_mode((width, height))\n self.clock = pygame.time.Clock()\n\n self.imgresh1 = None\n self.imgresh2 = None\n\n self.reward = 0\n self.MAXREWARD = 1.0\n self.PENALTY = -1.0\n self.MOVEPENALTY = 0.0\n \n self.BATCHSIZE = 19\n self.DISCOUNT = 0.99\n self.ALPHA = 0.3\n \n manual=False\n if manual == True:\n self.EPSILON = 0.999\n else:\n self.EPSILON = 0.3\n \n self.REPLAYSIZE = 40_000\n self.overall_score = 0\n self.overall_numbatches = 0\n self.overall_accumulatedstates = np.array([0.0,0.0,0.0,0.0])\n \n \n self.path = os.path.join(pathname, datadirname)\n self.modelpath = os.path.join(pathname, modeldirname)\n \n self.filename = \"data.csv\"\n \n self.model = create_q_model()\n self.model_target = create_q_model()\n\n self.learningrate = lr\n self.optimizer = keras.optimizers.Adam(learning_rate=self.learningrate, clipnorm=1.0)\n self.loss_function = keras.losses.Huber()\n\n self.checkpointname = os.path.join(pathname, modeldirname,checkpointparname)\n print(f\"loading checkpoint: {self.checkpointname}\")\n self.model_target.load_weights(self.checkpointname)\n \n self.overall_scores=[]\n self.checkpoint_counter=0\n \n self.shufflelist = []\n self.debugcounter = 0\n\n done = False\n\n hero = Hero(self, width / 2, height - 20)\n generator = Generator(self)\n rocket = None\n\n def run(self, i_index):\n i = i_index + self.get_maxi() + 1\n j = 0\n while not done:\n img1 = np.frombuffer(pygame.image.tostring(self.screen, \"RGB\"), dtype=np.uint8)\n self.imgresh1 = np.reshape(img1,(self.width,self.height, 3))\n self.imgresh1 = cv2.resize(self.imgresh1, dim, interpolation = cv2.INTER_NEAREST )\n\n current_state = np.array(self.imgresh1, dtype=np.float32)/255.0\n \n #if len(self.aliens) == 0:\n # self.displayText(\"WIN\")\n\n pressed = pygame.key.get_pressed()\n if pressed[pygame.K_LEFT]: # sipka doleva\n hero.x -= 2 if hero.x > 20 else 0 # leva hranice plochy\n elif pressed[pygame.K_RIGHT]: # sipka doprava\n hero.x += 2 if hero.x < width - 20 else 0 # prava hranice\n elif pressed[pygame.K_q]:\n pygame.quit()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE and not self.lost:\n self.rockets.append(Rocket(self, hero.x, hero.y))\n\n pygame.display.flip()\n self.clock.tick(60)\n self.screen.fill((0, 0, 0))\n\n for alien in self.aliens:\n alien.draw()\n alien.checkCollision(self)\n if (alien.y > height):\n self.lost = True\n #self.displayText(\"YOU DIED\")\n\n for rocket in self.rockets:\n rocket.draw()\n\n if not self.lost: hero.draw()\n\n self.write(i,j)\n\n j+=1\n\n def write(self, i, j): \n\n cv2.imwrite(os.path.join(self.path,\"current_{}_{}.png\".format(i,j)), self.imgresh1)\n cv2.imwrite(os.path.join(self.path,\"next_{}_{}.png\".format(i,j)), self.imgresh2)\n\n def train(self, i, j, term):\n \n # https://pythonprogramming.net/training-deep-q-learning-dqn-reinforcement-learning-python-tutorial/\n \n currentstate = \"current_{}_{}.png\".format(i,j)\n\n nextstate = \"next_{}_{}.png\".format(i,j) \n \n batch, files = self.pop_batch(self.BATCHSIZE)\n \n assert(self.imgresh1.shape == (dim[0], dim[1],3))\n assert(self.imgresh2.shape == (dim[0], dim[1],3))\n \n batch.append([self.imgresh1, actionstonum[self.changeto], self.reward, self.imgresh2, term, self.snake_pos[0], self.snake_pos[1], self.food_pos[0], self.food_pos[1]])\n files.append((\"current_{}_{}.png\".format(i,j), \"next_{}_{}.png\".format(i,j)))\n \n self.write(i,j)\n \n self.backprop(batch)\n \n self.numbatches += 1\n \n self.push_batch(batch, files) \n \n return \n \n def load_replay_memory(self):\n\n f = open(os.path.join(os.path.join(self.path,datacsvname)), \"r\")\n \n df = pd.read_csv(f, index_col = 0) \n\n for index, row in df.iterrows():\n\n currentpicname = row[\"currentstate\"]\n action = actionstonum[row[\"action\"]]\n reward = row[\"reward\"]\n nextpicname = row[\"nextstate\"]\n terminated = row[\"terminated\"]\n\n assert os.path.isfile(os.path.join(self.path,currentpicname)) == True\n assert (action < 5 and action >= 0)\n assert isinstance(reward,int) or isinstance(reward, float)\n assert os.path.isfile(os.path.join(self.path,nextpicname)) == True\n \n self.shufflelist.append([currentpicname,action,reward,nextpicname, terminated])\n\n random.shuffle(self.shufflelist)\n\n #print(f\"loading: size of replay memory {len(self.shufflelist)}\")\n \n f.close()\n \n return\n\n def displayText(self, text):\n pygame.font.init()\n font = pygame.font.SysFont('Arial', 50)\n textsurface = font.render(text, False, (44, 0, 62))\n self.screen.blit(textsurface, (110, 160))\n\n\nclass Alien:\n def __init__(self, game, x, y):\n self.x = x\n self.game = game\n self.y = y\n self.size = 40\n\n def draw(self):\n pygame.draw.rect(self.game.screen, # renderovací plocha\n (81, 43, 88), # barva objektu\n pygame.Rect(self.x, self.y, self.size, self.size))\n self.y += 0.4\n\n def checkCollision(self, game):\n for rocket in game.rockets:\n if (rocket.x < self.x + self.size and\n rocket.x > self.x - self.size and\n rocket.y < self.y + self.size and\n rocket.y > self.y - self.size):\n game.rockets.remove(rocket)\n game.aliens.remove(self)\n\n\nclass Hero:\n def __init__(self, game, x, y):\n self.x = x\n self.game = game\n self.y = y\n\n def draw(self):\n pygame.draw.rect(self.game.screen,\n (210, 250, 251),\n pygame.Rect(self.x, self.y, 40, 20))\n\n\nclass Generator:\n def __init__(self, game):\n margin = 30 # mezera od okraju obrazovky\n width = 50 # mezera mezi alieny\n for x in range(margin, game.width - margin, width):\n for y in range(margin, int(game.height / 2), width):\n if(random.randint(0,1)==1):\n game.aliens.append(Alien(game, x, y))\n \n \n\n # game.aliens.append(Alien(game, 280, 50))\n\n\nclass Rocket:\n def __init__(self, game, x, y):\n self.x = x\n self.y = y\n self.game = game\n\n def draw(self):\n pygame.draw.rect(self.game.screen, # renderovací plocha\n (254, 52, 110), # barva objektu\n pygame.Rect(self.x, self.y, 15, 15))\n self.y -= 2 # poletí po herní ploše nahoru 2px/snímek\n\n\nif __name__ == '__main__':\n game = Game(500, 500)"
}
] | 1 |
rikbruil/pyredux
|
https://github.com/rikbruil/pyredux
|
8e6e5b4a12dfb534eda96a8f611b1191a872acec
|
eed77ef94563f44da13f4cb8fce2ba72bb66a633
|
d767e7a0581ce7f09a017a105791a9ff3e562ec1
|
refs/heads/master
| 2021-01-10T05:11:52.062826 | 2016-01-23T16:55:34 | 2016-01-23T16:55:53 | 50,203,406 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.5899159908294678,
"avg_line_length": 20.25,
"blob_id": "e66a3c05c2da13af26442a089f4351c77024f8ed",
"content_id": "6ddf4a9de943845092257816f97a75569b81c183",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 595,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 28,
"path": "/pyredux/internal/store.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n\nclass BaseStore:\n _store = None\n\n def __init__(self, store):\n self._store = store\n\n def dispatch(self, action):\n return self._store['dispatch'](action)\n\n def get_state(self):\n return self._store['get_state']()\n\n\nclass Store(BaseStore):\n _store = None\n\n def __init__(self, store):\n self._store = store\n BaseStore.__init__(self, store)\n\n def subscribe(self, listener):\n return self._store['subscribe'](listener)\n\n def replace_reducer(self, reducer):\n return self._store['replace_reducer'](reducer)\n"
},
{
"alpha_fraction": 0.5720476508140564,
"alphanum_fraction": 0.5763813853263855,
"avg_line_length": 19.977272033691406,
"blob_id": "a063e159d2521fc39f33e71aec08986d8b797d9f",
"content_id": "a026220acb73eeed63a6cf6f1cafe2f2cb550bad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 923,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 44,
"path": "/pyredux/__init__.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"Pyredux module\n\nThis module is a port of the JavaScript Redux project.\n\"\"\"\n\nfrom functools import reduce\n\n\ndef compose(*funcs):\n \"\"\"\n Compose two or more callables together.\n\n Args:\n *funcs: List of callables to be composed\n\n Returns:\n Newly created callable\n \"\"\"\n\n def wrapped(*args):\n \"\"\"\n Newly created function which wraps the originally passed functions.\n\n Args:\n *args: The original arguments the functions would receive.\n\n Returns:\n Depends on the originally passed functions if this would return\n anything or not.\n \"\"\"\n if not len(funcs):\n return args[0]\n\n last = funcs[-1]\n rest = funcs[:-1]\n\n return reduce(lambda composed, f: f(composed), reversed(rest),\n last(*args))\n\n return wrapped\n\n\n__all__ = [\"store\", \"middleware\"]\n"
},
{
"alpha_fraction": 0.7790697813034058,
"alphanum_fraction": 0.7790697813034058,
"avg_line_length": 28,
"blob_id": "caf750f9a86730c7561fc042bb401a61f4741ad7",
"content_id": "204d49c2d596066027d15f399d654175b213ec59",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 3,
"path": "/pyredux/__init__.pyi",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "from typing import Iterable, Callable\n\ndef compose(*funcs: Callable) -> Callable: pass"
},
{
"alpha_fraction": 0.5954285860061646,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 23.30555534362793,
"blob_id": "e3822b27e7998bcf34a22a7ffb917f251e20415c",
"content_id": "0f415f6cb421137da0459a2c40e4903bddedf58e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 875,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 36,
"path": "/setup.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "import unittest\nimport sys\nfrom setuptools import setup, Command\n\n\nclass RunTests(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def run():\n loader = unittest.TestLoader()\n tests = loader.discover('test', pattern='*_test.py', top_level_dir='.')\n runner = unittest.TextTestRunner()\n results = runner.run(tests)\n sys.exit(0 if results.wasSuccessful() else 1)\n\n\nsetup(name='pyredux',\n version='0.1',\n description='Port of the Redux library for Python and for fun',\n url='http://github.com/rikbruil/pyredux',\n author='Rik Bruil',\n author_email='[email protected]',\n license='MIT',\n packages=['pyredux'],\n install_requires=[\n # 'markdown',\n ],\n zip_safe=False,\n cmdclass={'test': RunTests},)\n"
},
{
"alpha_fraction": 0.5556191205978394,
"alphanum_fraction": 0.5559050440788269,
"avg_line_length": 26.3203125,
"blob_id": "3852ed7fd1491cd530a728266d40c4dbacda14fd",
"content_id": "13421372ad8e993c074ab0e95dd0614e9b3bf246",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3497,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 128,
"path": "/pyredux/store.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom pyredux.internal.store import Store\nfrom pyredux.internal.params import __INIT\n\n\ndef create(reducer, initial_state):\n \"\"\"\n Create a new store to contain state. Only one should be used per\n application.\n\n Args:\n reducer: A callable that will accept two arguments: state and action\n initial_state: The initial state for this store. Can be any type.\n\n Returns:\n A store object with dispatch, get_state, replace_reducer\n and subscribe functions.\n \"\"\"\n if not callable(reducer):\n raise TypeError\n\n _state = {\"current_reducer\": reducer,\n \"current_state\": initial_state,\n \"listeners\": [],\n \"is_dispatching\": False}\n\n def get_state():\n \"\"\"\n Get the current state from the store.\n\n Returns:\n The current state of the store\n \"\"\"\n return _state[\"current_state\"]\n\n def subscribe(listener):\n \"\"\"\n\n Args:\n listener: Callable which will accept two callables\n (dispatch, get_state). These can be used to dispatch new\n actions and retrieve the current state.\n\n Returns:\n callable: An unsubscribe function to unsubscribe\n the given listener.\n \"\"\"\n _state[\"listeners\"].append(listener)\n\n state = {\"is_subscribed\": True}\n\n def unsubscribe():\n \"\"\"\n Un-subscibe the previously subscribed listener.\n This function can be called multiple times, but will only\n return False when any action was taken.\n\n Returns:\n bool: True or False depending on if any action was taken.\n \"\"\"\n\n if not state[\"is_subscribed\"]:\n return False\n\n state[\"is_subscribed\"] = False\n\n try:\n _state[\"listeners\"].remove(listener)\n except ValueError:\n return False\n\n return True\n\n return unsubscribe\n\n def dispatch(action):\n \"\"\"\n Dispatch the given action by calling the reducer and firing the\n listeners when complete.\n\n Args:\n action: A dict(-like) with a type key.\n Can contain more keys, but type is required.\n\n Returns:\n The action that was passed to the dispatcher\n \"\"\"\n\n if not isinstance(action, dict):\n raise TypeError\n\n if not action.get('type'):\n raise TypeError\n\n if _state[\"is_dispatching\"]:\n raise RuntimeError\n\n try:\n state = _state['current_state']\n _reducer = _state['current_reducer']\n\n _state[\"is_dispatching\"] = True\n _state[\"current_state\"] = _reducer(state, action)\n finally:\n _state[\"is_dispatching\"] = False\n\n for listener in _state[\"listeners\"]:\n listener(dispatch, get_state)\n\n return action\n\n def replace_reducer(next_reducer):\n \"\"\"\n Replace the current reducer used by the store with a different one\n\n Args:\n next_reducer: The reducer to replace the current reducer with\n \"\"\"\n _state[\"current_reducer\"] = next_reducer\n dispatch({\"type\": __INIT})\n\n dispatch({\"type\": __INIT})\n\n return Store({\"dispatch\": dispatch,\n \"get_state\": get_state,\n \"replace_reducer\": replace_reducer,\n \"subscribe\": subscribe})\n"
},
{
"alpha_fraction": 0.691428542137146,
"alphanum_fraction": 0.691428542137146,
"avg_line_length": 24,
"blob_id": "30f65938e6f6d465da1ecbef81ba3d853c235854",
"content_id": "96881d96d888674e7770b28296e29f95dda360f3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/pyredux/store.pyi",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "from typing import Any, Callable, Dict\nfrom pyredux.internal.store import Store\n\ndef create(\n reducer: Callable[Any, Dict],\n initial_state: Any\n) -> Store: pass\n"
},
{
"alpha_fraction": 0.5770547986030579,
"alphanum_fraction": 0.5787671208381653,
"avg_line_length": 22.360000610351562,
"blob_id": "cf27014dd547bfdff14a59cb46aea461384907c8",
"content_id": "06fd5b51dabba5fcc1281f4db46e92caed1ec5ee",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 584,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 25,
"path": "/pyredux/middleware.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom pyredux import compose\n\n\ndef apply(*middlewares):\n\n def iterate(other):\n\n def wrap(reducer, initial_state):\n store = other(reducer, initial_state)\n dispatch = store.dispatch\n api = {\"dispatch\": lambda action: dispatch(action),\n \"get_state\": store.get_state}\n\n chain = map(lambda middleware: middleware(api), middlewares)\n dispatch = compose(*chain)(store.dispatch)\n\n store.dispatch = dispatch\n\n return store\n\n return wrap\n\n return iterate\n"
},
{
"alpha_fraction": 0.6312189102172852,
"alphanum_fraction": 0.6355721354484558,
"avg_line_length": 26.25423812866211,
"blob_id": "c5a08e4087f509b19041cd8c6e5e61a7d07ca18b",
"content_id": "b179187ec10de6008bfbea71153353950ec835e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1608,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 59,
"path": "/test/store_test.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom pyredux.store import create\n\n\nclass StoreTest(unittest.TestCase):\n\n @staticmethod\n def reducer(state, action):\n return state + 1\n\n def testShouldHandleCreate(self):\n state = 1\n store = create(self.reducer, state)\n\n self.assertEqual(store.get_state(), state + 1)\n\n def testShouldRaiseTypeExceptionOnInvalidActionType(self):\n action = {}\n state = 1\n store = create(self.reducer, state)\n\n with self.assertRaises(TypeError):\n store.dispatch('foo')\n\n with self.assertRaises(TypeError):\n store.dispatch(action)\n\n def testShouldRaiseTypeExceptionOnInvalidReducerType(self):\n state = 1\n with self.assertRaises(TypeError):\n create(\"foo\", state)\n\n def testShouldRaiseExceptionWhenAlreadyDispatching(self):\n _state = 1\n _store = {\"store\": create(self.reducer, _state)}\n store = _store[\"store\"]\n\n def reducer(state, action):\n store.dispatch(action)\n return state\n\n with self.assertRaises(RuntimeError):\n store.replace_reducer(reducer)\n\n def listener(self, dispatch, get_state):\n self.assertTrue(callable(dispatch))\n self.assertTrue(callable(get_state))\n\n def testShouldFireListenerOnDispatch(self):\n state = 0\n\n store = create(self.reducer, state)\n unsubscribe = store.subscribe(self.listener)\n self.assertTrue(callable(unsubscribe))\n\n store.dispatch({\"type\": \"foo\"})\n\n self.assertTrue(unsubscribe())\n self.assertFalse(unsubscribe())\n"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6499999761581421,
"avg_line_length": 10,
"blob_id": "d18ccf00c00db85380a38459d2f3800ca8db08ca",
"content_id": "758ecefb92fff66d4ce542221a15f10ae9b372ec",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 120,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 11,
"path": "/.coveragerc",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "[run]\nbranch = True\ninclude =\n pyredux/*\n test/*\n\n[html]\ndirectory = build/html\n\n[xml]\noutput = build/coverage.xml"
},
{
"alpha_fraction": 0.3461538553237915,
"alphanum_fraction": 0.6153846383094788,
"avg_line_length": 12,
"blob_id": "de86049d78bf3d34801e586a4ab1e7012f669dd3",
"content_id": "ae7aa12999e5397b1f1b4d5792ad46defe197578",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 52,
"license_type": "permissive",
"max_line_length": 15,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "coverage==4.0.3\npep8==1.7.0\nsix==1.10.0\nyapf==0.6.2\n"
},
{
"alpha_fraction": 0.6191222667694092,
"alphanum_fraction": 0.6285266280174255,
"avg_line_length": 21,
"blob_id": "b572463735cd80eb1a366a7fefd59f47180beca3",
"content_id": "b091298e7a9cf3fd69f46dc09ba399efa332f1c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 638,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 29,
"path": "/test/compose_test.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom pyredux import compose\n\n\nclass ComposeTest(unittest.TestCase):\n\n def testShouldReturnArgumentWhenNothingToCompose(self):\n expected = 1\n\n composed = compose()\n actual = composed(expected)\n\n self.assertEqual(actual, expected)\n\n def testShouldComposeFunctions(self):\n value = 1\n\n def func1(param):\n self.assertEqual(param, value)\n return param\n\n def func2(param):\n self.assertEqual(param, value)\n return param\n\n composed = compose(func1, func2)\n self.assertTrue(callable(composed))\n\n composed(value)\n"
},
{
"alpha_fraction": 0.6825938820838928,
"alphanum_fraction": 0.6894198060035706,
"avg_line_length": 14.421052932739258,
"blob_id": "80d3a6ba0bdd170e4a8ea2f353a546535c975bc2",
"content_id": "087997a4741bcfc868aad9a6c521278c2f1cf204",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 19,
"path": "/example/main.py",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "from pyredux.store import create\n\nstate = 0\n\n\ndef reducer(state, action):\n print(\"trigger reducer\")\n return state + 1\n\n\ndef listener(dispatch, get_state):\n print(\"Listener fired!\")\n return\n\n\nstore = create(reducer, state)\nstore.subscribe(listener)\n\nstore.dispatch({\"type\": \"foo\"})\n"
},
{
"alpha_fraction": 0.7755905389785767,
"alphanum_fraction": 0.7755905389785767,
"avg_line_length": 71.57142639160156,
"blob_id": "3c81d5ecec0deddc529ae95c9f8423055d5ce3a7",
"content_id": "3fe875ca04f19bf98181042d1db1db36365e3d19",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 508,
"license_type": "permissive",
"max_line_length": 172,
"num_lines": 7,
"path": "/README.md",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "# PyRedux\n\n[](https://travis-ci.org/rikbruil/pyredux)\n[](https://coveralls.io/github/rikbruil/pyredux?branch=master)\n[](https://scrutinizer-ci.com/g/rikbruil/pyredux/?branch=master)\n\nA port of Redux for Python (for fun and learning)\n"
},
{
"alpha_fraction": 0.7069486379623413,
"alphanum_fraction": 0.7069486379623413,
"avg_line_length": 35.88888931274414,
"blob_id": "4fc1f36f7b3737f7f5bd23d2878b2402c1affc40",
"content_id": "a14f10ac01e515d26c02d63ecd992eb0fd0e6114",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 331,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 9,
"path": "/pyredux/internal/store.pyi",
"repo_name": "rikbruil/pyredux",
"src_encoding": "UTF-8",
"text": "from typing import Any, Callable, Dict\n\nclass BaseStore:\n def dispatch(self, action: Dict) -> Dict: pass\n def get_state(self) -> Any: pass\n\nclass Store(BaseStore):\n def subscribe(self, listener: Callable[Callable[Dict], Callable]) -> Callable: pass\n def replace_reducer(self, reducer: Callable[Any, Dict]) -> None: pass"
}
] | 14 |
AodhanDalton/python-sorting-algorthms
|
https://github.com/AodhanDalton/python-sorting-algorthms
|
5f88c7f9dc22794be499cf9073603d15cd21e40e
|
09a8fb8aab9e242df6548fbefd716e029ed81738
|
8ccbc9d0095be853e22be740d09b7e7541021dbe
|
refs/heads/master
| 2021-08-07T17:34:18.501950 | 2020-12-08T22:04:56 | 2020-12-08T22:04:56 | 228,642,666 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8461538553237915,
"alphanum_fraction": 0.8461538553237915,
"avg_line_length": 25,
"blob_id": "acd3b955e9c9ceec1392b6ee432db9e57d982316",
"content_id": "e03f90f09535c9e1fdf5b6aa71563bab2c53e707",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/README.md",
"repo_name": "AodhanDalton/python-sorting-algorthms",
"src_encoding": "UTF-8",
"text": "# python-sorting-algorthms\nPython sorting algorthms\n"
},
{
"alpha_fraction": 0.5042808055877686,
"alphanum_fraction": 0.5316780805587769,
"avg_line_length": 24.39130401611328,
"blob_id": "b1aecd7152f62ed900527b40435027c0fbaaabdc",
"content_id": "1668863315853793f58b90ef71959b5feea8f9aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1168,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 46,
"path": "/sorts/selection_sort.py",
"repo_name": "AodhanDalton/python-sorting-algorthms",
"src_encoding": "UTF-8",
"text": "import sys\nimport random\nimport time\n\nclass color:\n GREEN = '\\033[92m'\n RED = '\\033[91m'\n END = '\\033[0m'\n\nusr = input(\"Size of array: \")\n\n# creating an array of the size the user has entered with random numbers from 1,1000\narr = [random.randint(1,1000) for _ in range(usr)]\n\n# for loop with the selection sort inside it\nprint(color.GREEN + \"Size %d\" %usr)\n\nfor i in range (len(arr)):\n\n min = i\n for j in range (i+1, len(arr)):\n if arr[min] > arr[j]:\n min = j\n\n # swapping the values \n arr[i], arr[min] = arr[min], arr[i]\n tmp = arr[i]\n # Print statment for the end of each loop\n print (\"\\nRound %d\" %(i+1))\n for i in range (len(arr)):\n if(arr[i] == arr[min]):\n print(color.RED + '%d' %arr[i] + color.END),\n time.sleep(.05)\n elif(arr[i] == tmp):\n print(color.GREEN + '%d' %arr[i] + color.END),\n time.sleep(.05)\n else:\n print (\"%d\" %arr[i]),\n time.sleep(.05)\n\n# Printing the value of the sorted array \nprint (\"\\n===============\")\nprint (\"Sorted array\") \nfor i in range(len(arr)): \n print(\"%d\" %arr[i]),\nprint (\"\\n===============\")\n"
},
{
"alpha_fraction": 0.5027933120727539,
"alphanum_fraction": 0.5251396894454956,
"avg_line_length": 21.40625,
"blob_id": "4aaa13fe3621d84c9fd37b067cc721b92f368675",
"content_id": "c0c581a3abf8840fd804877f873e6c88209739e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 716,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 32,
"path": "/sorts/quick_sort.py",
"repo_name": "AodhanDalton/python-sorting-algorthms",
"src_encoding": "UTF-8",
"text": "import random\nk = 0\ndef partition(lst, low, high):\n global k\n i = (low -1)\n pivot = lst[high]\n for j in range(low, high):\n if lst[j] <= pivot:\n i += 1\n lst[i], lst[j] = lst[j], lst[i]\n lst[i+1],lst[high] = lst[high], lst[i+1]\n k+=1\n print(\"Round %d\" %k)\n print(lst)\n return (i+1)\n \ndef quick_sort(lst, low, high):\n if low < high:\n pi = partition(lst, low, high)\n quick_sort(lst, low, pi-1)\n quick_sort(lst, pi+1, high)\n \narr = []\nsize = int(input(\"Enter size of the list: \"))\n\narr = [random.randint(1,1000) for _ in range(size)]\n \nlow = 0\nhigh = len(arr) - 1\nquick_sort(arr, low, high)\nprint(\"---Final Result----\")\nprint(arr)"
}
] | 3 |
debmalya92/credit-card-defaulter-prediction
|
https://github.com/debmalya92/credit-card-defaulter-prediction
|
b315b7fabd09287c5deec802036461f87f0b9b33
|
c6e4e4df227c1dd47e79a50ad6631d986a656880
|
1b42aaf30fee6ab382b028a21975b414cb352539
|
refs/heads/main
| 2023-01-04T21:54:18.237641 | 2020-10-29T13:47:31 | 2020-10-29T13:47:31 | 307,806,037 | 1 | 8 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6529331207275391,
"alphanum_fraction": 0.6770329475402832,
"avg_line_length": 22.72945213317871,
"blob_id": "9669eb2068d633f1581283427f75dc7c8b45416b",
"content_id": "36f92312d8b5773dc55d61a944cd55142807d48a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13861,
"license_type": "no_license",
"max_line_length": 440,
"num_lines": 584,
"path": "/credit_default_prediction.py",
"repo_name": "debmalya92/credit-card-defaulter-prediction",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ## Credit Default Prediction\n\n# ###### Overview\n# We will build a random forest model to predict whether a given customer defaults or not. Credit default is one of the most important problems in the banking and risk analytics industry. There are various attributes which can be used to predict default, such as demographic data (age, income, employment status, etc.), (credit) behavioural data (past loans, payment, number of times a credit payment has been delayed by the customer etc.).\n# \n\n# ##### Dataset Information\n# This dataset contains information on default payments, demographic factors, credit data, history of payment, and bill statements of credit card clients in Taiwan from April 2005 to September 2005.\n\n# ##### There are 25 variables:\n# \n# - **ID**: ID of each client\n# - **LIMIT_BAL**: Amount of given credit in NT dollars (includes individual and family/supplementary credit\n# - **SEX**: Gender (1=male, 2=female)\n# - **EDUCATION**: (1=graduate school, 2=university, 3=high school, 4=others, 5=unknown, 6=unknown)\n# - **MARRIAGE**: Marital status (1=married, 2=single, 3=others)\n# - **AGE**: Age in years\n# - **PAY_0**: Repayment status in September, 2005 (-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, … 8=payment delay for eight months, 9=payment delay for nine months and above)\n# - **PAY_2**: Repayment status in August, 2005 (scale same as above)\n# - **PAY_3**: Repayment status in July, 2005 (scale same as above)\n# - **PAY_4**: Repayment status in June, 2005 (scale same as above)\n# - **PAY_5**: Repayment status in May, 2005 (scale same as above)\n# - **PAY_6**: Repayment status in April, 2005 (scale same as above)\n# - **BILL_AMT1**: Amount of bill statement in September, 2005 (NT dollar)\n# - **BILL_AMT2**: Amount of bill statement in August, 2005 (NT dollar)\n# - **BILL_AMT3**: Amount of bill statement in July, 2005 (NT dollar)\n# - **BILL_AMT4**: Amount of bill statement in June, 2005 (NT dollar)\n# - **BILL_AMT5**: Amount of bill statement in May, 2005 (NT dollar)\n# - **BILL_AMT6**: Amount of bill statement in April, 2005 (NT dollar)\n# - **PAY_AMT1**: Amount of previous payment in September, 2005 (NT dollar)\n# - **PAY_AMT2**: Amount of previous payment in August, 2005 (NT dollar)\n# - **PAY_AMT3**: Amount of previous payment in July, 2005 (NT dollar)\n# - **PAY_AMT4**: Amount of previous payment in June, 2005 (NT dollar)\n# - **PAY_AMT5**: Amount of previous payment in May, 2005 (NT dollar)\n# - **PAY_AMT6**: Amount of previous payment in April, 2005 (NT dollar)\n# - **default.payment.next.month**: Default payment (1=yes, 0=no)\n\n# <hr>\n\n# ### Data Understanding and Cleaning\n\n# In[63]:\n\n\n# Importing the required libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report,confusion_matrix, accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\n\n# To ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[64]:\n\n\n# Reading the csv file and putting it into 'df' object.\ndf = pd.read_csv('credit-card-default.csv')\ndf.head()\n\n\n# In[65]:\n\n\n# Let's understand the type of columns\ndf.info()\n\n\n# In[66]:\n\n\ndf.SEX.value_counts()\n\n\n# In[67]:\n\n\ndf.EDUCATION.value_counts()\n\n\n# As per data dictionary there are 5 categories 1 to 5 for Education column and 5 and 6 are both showing as 'UNKNOWN'. There is no **0** category in the dictionary but present in dataset.\n# - Hence Combining `0, 5, and 6` together as **'UNKNOWN'** category. Changing all `6 and 0` to `5`.\n\n# In[68]:\n\n\ndf['EDUCATION'].replace([0, 6], 5, inplace=True)\n\n\n# In[69]:\n\n\ndf.EDUCATION.value_counts()\n\n\n# In[70]:\n\n\ndf.MARRIAGE.value_counts()\n\n\n# As per data dictionary there are 3 categories 1 to 3 for Marriage column but **0** category present in dataset.\n# - Hence Combining `0` as **'Others'** category. Changing all `0` to `3`.\n\n# In[71]:\n\n\ndf['MARRIAGE'].replace(0, 3, inplace=True)\n\n\n# In[72]:\n\n\ndf.MARRIAGE.value_counts()\n\n\n# In[73]:\n\n\ndf.PAY_2.value_counts()\n\n\n# In[74]:\n\n\ndf.PAY_0.value_counts()\n\n\n# In this case, we know that there are no major data quality issues, so we'll go ahead and build the model.\n\n# <hr>\n\n# ### Data Preparation and Model Building\n\n# In[75]:\n\n\n# Dropping id column as it's no use\ndf.drop('ID',axis=1, inplace=True)\n\n\n# In[76]:\n\n\n# Putting feature variable to X\nX = df.drop('defaulted',axis=1)\n\n# Putting response variable to y\ny = df['defaulted']\n\n# Splitting the data into train and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101)\n\n\n# \n# \n# #### Default Hyperparameters\n# Let's first fit a random forest model with default hyperparameters.\n\n# In[77]:\n\n\n# Running the random forest with default parameters.\nrfc = RandomForestClassifier()\n\n# fit\nrfc.fit(X_train,y_train)\n\n\n# In[78]:\n\n\n# Making predictions\npredictions = rfc.predict(X_test)\n\n\n# In[79]:\n\n\n# Let's check the report of our default model\nprint(classification_report(y_test,predictions))\n\n# Printing confusion matrix\nprint(confusion_matrix(y_test,predictions))\n\nprint(accuracy_score(y_test,predictions))\n\n\n# So far so good, let's now look at the list of hyperparameters which we can tune to improve model performance.\n\n# <hr>\n\n# ### Hyperparameter Tuning\n\n# The following hyperparameters are present in a random forest classifier. We will tune each parameters\n# \n# \n# - **n_estimators** \n# - **criterion**\n# - **max_features** \n# - **max_depth**\n# - **min_samples_split**\n# - **min_samples_leaf**\n# - **min_weight_fraction_leaf**\n# - **max_leaf_nodes**\n# - **min_impurity_split**\n\n# <hr>\n\n# ### Tuning max_depth\n\n# Let's try to find the optimum values for ```max_depth``` and understand how the value of max_depth impacts the overall accuracy of the ensemble.\n# \n\n# In[80]:\n\n\n# specify number of folds for k-fold CV\nn_folds = 5\n\n# parameters to build the model on\nparameters = {'max_depth': range(2, 20, 5)}\n\n# instantiate the model\nrf = RandomForestClassifier()\n\n\n# fit tree on training data\nrf = GridSearchCV(rf, parameters, \n cv=n_folds, \n scoring=\"accuracy\",\n return_train_score=True)\nrf.fit(X_train, y_train)\n\n\n# In[81]:\n\n\n# scores of GridSearch CV\nscores = rf.cv_results_\npd.DataFrame(scores).head()\n\n\n# In[82]:\n\n\n# plotting accuracies with max_depth\nplt.figure()\nplt.plot(scores[\"param_max_depth\"], \n scores[\"mean_train_score\"], \n label=\"training accuracy\")\nplt.plot(scores[\"param_max_depth\"], \n scores[\"mean_test_score\"], \n label=\"test accuracy\")\nplt.xlabel(\"max_depth\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()\n\n\n# You can see that as we increase the value of max_depth, both train and test scores increase till a point, but after that test score starts to decrease. The ensemble tries to overfit as we increase the max_depth.\n# \n# Thus, controlling the depth of the constituent trees will help reduce overfitting in the forest.\n\n# <hr>\n\n# ### Tuning n_estimators\n\n# Let's try to find the optimum values for n_estimators and understand how the value of n_estimators impacts the overall accuracy. Notice that we'll specify an appropriately low value of max_depth, so that the trees do not overfit.\n# <br>\n# \n\n# In[83]:\n\n\n# specify number of folds for k-fold CV\nn_folds = 5\n\n# parameters to build the model on\nparameters = {'n_estimators': range(100, 1500, 400)}\n\n# instantiate the model (note we are specifying a max_depth)\nrf = RandomForestClassifier(max_depth=4)\n\n\n# fit tree on training data\nrf = GridSearchCV(rf, parameters, \n cv=n_folds, \n scoring=\"accuracy\",\n return_train_score=True)\nrf.fit(X_train, y_train)\n\n\n# In[84]:\n\n\n# scores of GridSearch CV\nscores = rf.cv_results_\npd.DataFrame(scores).head()\n\n\n# In[85]:\n\n\n# plotting accuracies with n_estimators\nplt.figure()\nplt.plot(scores[\"param_n_estimators\"], \n scores[\"mean_train_score\"], \n label=\"training accuracy\")\nplt.plot(scores[\"param_n_estimators\"], \n scores[\"mean_test_score\"], \n label=\"test accuracy\")\nplt.xlabel(\"n_estimators\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()\n\n\n# <hr>\n\n# ### Tuning max_features\n# \n# Let's see how the model performance varies with ```max_features```, which is the maximum numbre of features considered for splitting at a node.\n\n# In[86]:\n\n\n# specify number of folds for k-fold CV\nn_folds = 5\n\n# parameters to build the model on\nparameters = {'max_features': [4, 8, 14, 20, 24]}\n\n# instantiate the model\nrf = RandomForestClassifier(max_depth=4)\n\n\n# fit tree on training data\nrf = GridSearchCV(rf, parameters, \n cv=n_folds, \n scoring=\"accuracy\",\n return_train_score=True)\nrf.fit(X_train, y_train)\n\n\n# In[87]:\n\n\n# scores of GridSearch CV\nscores = rf.cv_results_\npd.DataFrame(scores).head()\n\n\n# In[88]:\n\n\n# plotting accuracies with max_features\nplt.figure()\nplt.plot(scores[\"param_max_features\"], \n scores[\"mean_train_score\"], \n label=\"training accuracy\")\nplt.plot(scores[\"param_max_features\"], \n scores[\"mean_test_score\"], \n label=\"test accuracy\")\nplt.xlabel(\"max_features\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()\n\n\n# Apparently, the training and test scores *both* seem to increase as we increase max_features, and the model doesn't seem to overfit more with increasing max_features. Think about why that might be the case.\n\n# ### Tuning min_samples_leaf\n\n# The hyperparameter **min_samples_leaf** is the minimum number of samples required to be at a leaf node:\n# - If int, then consider min_samples_leaf as the minimum number.\n# - If float, then min_samples_leaf is a percentage and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.\n\n# Let's now check the optimum value for min samples leaf in our case.\n\n# In[89]:\n\n\n# specify number of folds for k-fold CV\nn_folds = 5\n\n# parameters to build the model on\nparameters = {'min_samples_leaf': range(100, 400, 50)}\n\n# instantiate the model\nrf = RandomForestClassifier()\n\n\n# fit tree on training data\nrf = GridSearchCV(rf, parameters, \n cv=n_folds, \n scoring=\"accuracy\",\n return_train_score=True)\nrf.fit(X_train, y_train)\n\n\n# In[90]:\n\n\n# scores of GridSearch CV\nscores = rf.cv_results_\npd.DataFrame(scores).head()\n\n\n# In[91]:\n\n\n# plotting accuracies with min_samples_leaf\nplt.figure()\nplt.plot(scores[\"param_min_samples_leaf\"], \n scores[\"mean_train_score\"], \n label=\"training accuracy\")\nplt.plot(scores[\"param_min_samples_leaf\"], \n scores[\"mean_test_score\"], \n label=\"test accuracy\")\nplt.xlabel(\"min_samples_leaf\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()\n\n\n# You can see that the model starts of overfit as you decrease the value of min_samples_leaf. \n\n# ### Tuning min_samples_split\n# \n# Let's now look at the performance of the ensemble as we vary min_samples_split.\n\n# In[92]:\n\n\n# GridSearchCV to find optimal min_samples_split\n# specify number of folds for k-fold CV\nn_folds = 5\n\n# parameters to build the model on\nparameters = {'min_samples_split': range(200, 500, 50)}\n\n# instantiate the model\nrf = RandomForestClassifier()\n\n\n# fit tree on training data\nrf = GridSearchCV(rf, parameters, \n cv=n_folds, \n scoring=\"accuracy\",\n return_train_score=True)\nrf.fit(X_train, y_train)\n\n\n# In[93]:\n\n\n# scores of GridSearch CV\nscores = rf.cv_results_\npd.DataFrame(scores).head()\n\n\n# In[94]:\n\n\n# plotting accuracies with min_samples_split\nplt.figure()\nplt.plot(scores[\"param_min_samples_split\"], \n scores[\"mean_train_score\"], \n label=\"training accuracy\")\nplt.plot(scores[\"param_min_samples_split\"], \n scores[\"mean_test_score\"], \n label=\"test accuracy\")\nplt.xlabel(\"min_samples_split\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()\n\n\n# <hr>\n\n# ### Grid Search to Find Optimal Hyperparameters\n\n# We can now find the optimal hyperparameters using GridSearchCV.\n\n# In[95]:\n\n\n# Create the parameter grid based on the results of random search \nparam_grid = {\n 'max_depth': [4,8,10],\n 'min_samples_leaf': range(100, 400, 200),\n 'min_samples_split': range(200, 500, 200),\n 'n_estimators': [100,200, 300], \n 'max_features': [5, 10]\n}\n\n# Create a based model\nrf = RandomForestClassifier()\n\n# Instantiate the grid search model\ngrid_search = GridSearchCV(estimator = rf, param_grid = param_grid, \n cv = 3, n_jobs = -1,verbose = 1)\n\n\n# In[96]:\n\n\n# Fit the grid search to the data\ngrid_search.fit(X_train, y_train)\n\n\n# In[97]:\n\n\n# printing the optimal accuracy score and hyperparameters\nprint('We can get accuracy of',grid_search.best_score_,'using',grid_search.best_params_)\n\n\n# In[98]:\n\n\ntype(grid_search.best_params_)\n\n\n# **Fitting the final model with the best parameters obtained from grid search.**\n\n# In[104]:\n\n\n# model with the best hyperparameters\n\nrfc = RandomForestClassifier(bootstrap=True,\n max_depth=4,\n min_samples_leaf=100, \n min_samples_split=200,\n max_features=10,\n n_estimators=300)\n\n\n# In[105]:\n\n\n# fit\nrfc.fit(X_train,y_train)\n\n\n# In[106]:\n\n\n# predict\npredictions = rfc.predict(X_test)\n\n\n# In[107]:\n\n\n# Let's check the report of our default model\nprint(classification_report(y_test,predictions))\n\n# Printing confusion matrix\nprint(confusion_matrix(y_test,predictions))\n\nprint(accuracy_score(y_test,predictions))\n\n\n# In[108]:\n\n\n# Saving the model to disk\npickle.dump(rfc, open('model.pkl', 'wb'))\n\n"
}
] | 1 |
M-Lambda/cs-module-project-hash-tables
|
https://github.com/M-Lambda/cs-module-project-hash-tables
|
2f5bc5487cbb365a5fccba534eb3aa9e71f1ded6
|
9c80393878ee6f1963419770cc9540120488fe55
|
7461a3bbd526d78ae09ce5d616956ee3efc85a94
|
refs/heads/master
| 2023-01-10T02:31:55.892969 | 2020-11-13T20:43:11 | 2020-11-13T20:43:11 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.45625001192092896,
"alphanum_fraction": 0.45875000953674316,
"avg_line_length": 26.586206436157227,
"blob_id": "1426b06a4f98a716bb0bb7479be5e5f82f6caaa7",
"content_id": "eac9f84deec25cfcd19b013d31f9e045b5a23b2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 29,
"path": "/applications/word_count/word_count.py",
"repo_name": "M-Lambda/cs-module-project-hash-tables",
"src_encoding": "UTF-8",
"text": "def word_count(s):\n # Your code here\n dic = {}\n excluded = '\":;,.-+=/\\\\|[]{}()*^&'\n if s == '':\n return dic\n s = s.lower()\n s = s.replace('\\r', ' ')\n s = s.replace('\\n', ' ')\n s = s.replace('\\t', ' ')\n words = s.split(' ')\n for word in words:\n for char in excluded:\n if char in word:\n word = word.replace(char, '')\n if word != '':\n if word in dic:\n dic[word] += 1\n else:\n dic[word] = 1\n return dic\n\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count(\n 'This is a test of the emergency broadcast network. This is only a test.'))\n"
},
{
"alpha_fraction": 0.42152467370033264,
"alphanum_fraction": 0.42825111746788025,
"avg_line_length": 23.77777862548828,
"blob_id": "ad0f5fecdbd9a460ee9a5259f196f33947e4d416",
"content_id": "dafe9f4e2468360c6272dc46043de0ca242d2519",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 892,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 36,
"path": "/applications/histo/histo.py",
"repo_name": "M-Lambda/cs-module-project-hash-tables",
"src_encoding": "UTF-8",
"text": "# Your code here\n\n\ndef hist(file):\n with open(file) as f:\n s = f.read()\n # Your code here\n dic = {}\n excluded = '\":;,.-+=/\\\\|[]{}()*^&'\n if s == '':\n return dic\n s = s.lower()\n s = s.replace('\\r', ' ')\n s = s.replace('\\n', ' ')\n s = s.replace('\\t', ' ')\n words = s.split(' ')\n for word in words:\n for char in excluded:\n if char in word:\n word = word.replace(char, '')\n if word != '':\n if word in dic:\n dic[word] += 1\n else:\n dic[word] = 1\n dic = {k: v for k, v in sorted(\n dic.items(), key=lambda item: item[0])}\n dic = {k: v for k, v in sorted(\n dic.items(), key=lambda item: item[1], reverse=True)}\n for key in dic:\n l = len(key)\n space = (20 - l)*' '\n print(key, space, dic[key]*'#')\n\n\nhist(\"./robin.txt\")\n"
},
{
"alpha_fraction": 0.315315306186676,
"alphanum_fraction": 0.36036035418510437,
"avg_line_length": 22.545454025268555,
"blob_id": "a76f4777bb93f6ce2fa0f0a7b2bb8d4df8b733ea",
"content_id": "bd7924740ac129a7ae341ccee4c1b8584121d988",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 777,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 33,
"path": "/applications/sumdiff/sumdiff.py",
"repo_name": "M-Lambda/cs-module-project-hash-tables",
"src_encoding": "UTF-8",
"text": "\"\"\"\nfind all a, b, c, d in q such that\nf(a) + f(b) = f(c) - f(d)\n\"\"\"\n\nfrom itertools import permutations\n# q = set(range(1, 10))\n# q = set(range(1, 200))\nq = (1, 3, 4, 7, 12)\n\n\ndef f(x):\n return x * 4 + 6\n\n\n# Your code here\nz = q * 2\nperm = list(permutations(z, 2))\nl = []\nfor each in perm:\n add = 0\n sub = 0\n for i in perm:\n add = f(i[0]) + f(i[1])\n for j in perm:\n sub = f(j[0]) - f(j[1])\n if add == sub:\n k = str(i[0]) + '_' + str(i[1]) + '_' + \\\n str(j[0]) + '_' + str(j[1])\n if k not in l:\n l.append(k)\n print(\n f\"f({i[0]}) + f({i[1]}) = f({j[0]}) - f({j[1]}) {f(i[0])} + {f(i[1])} = {f(j[0])} - {f(j[1])}\")\n"
},
{
"alpha_fraction": 0.5373271703720093,
"alphanum_fraction": 0.547465443611145,
"avg_line_length": 24.83333396911621,
"blob_id": "61ad2c20ea28726f0ad732562444c42904188f92",
"content_id": "10969b74a3cf59d3f1bf41f4cb2f8ff069a04774",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1085,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 42,
"path": "/applications/markov/markov.py",
"repo_name": "M-Lambda/cs-module-project-hash-tables",
"src_encoding": "UTF-8",
"text": "import random\n\n# Read in all the words in one go\nwith open(\"input.txt\") as f:\n words = f.read()\n\n# TODO: analyze which words can follow other words\n# Your code here\n\n\ndef markov(words):\n dic = {}\n words = words.split(' ')\n for i in range(0, len(words)-1):\n if words[i] in dic:\n dic[words[i]].append(words[i+1])\n else:\n dic[words[i]] = [words[i + 1]]\n start_not_found = True\n while start_not_found:\n start = random.choice(words)\n if start.isupper() or start[0] == '\"':\n start_not_found = False\n\n next_word = start\n sentence = start\n stop_list = ['.', '?', '!', '.\"', '?\"', '!\"']\n stop_not_found = True\n while stop_not_found:\n next_word = random.choice(dic[next_word])\n if next_word[-1] in stop_list or next_word[-2:-1] in stop_list:\n sentence += ' ' + next_word\n stop_not_found = False\n else:\n sentence += ' ' + next_word\n print(sentence)\n# TODO: construct 5 random sentences\n# Your code here\n\n\nfor i in range(0, 5):\n markov(words)\n"
},
{
"alpha_fraction": 0.48944100737571716,
"alphanum_fraction": 0.4968944191932678,
"avg_line_length": 24.967741012573242,
"blob_id": "0d3bb4ed63df7a5448eb32d5b8680b080f1ceb2b",
"content_id": "a11c42f449946164e4a3fd367af614011484af6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 31,
"path": "/applications/crack_caesar/crack_caesar.py",
"repo_name": "M-Lambda/cs-module-project-hash-tables",
"src_encoding": "UTF-8",
"text": "# Use frequency analysis to find the key to ciphertext.txt, and then\n# decode it.\nwith open(\"ciphertext.txt\") as f:\n encoded = f.read()\n# Your code here\nfreq = ['E', 'T', 'A', 'O', 'H', 'N', 'R', 'I', 'S', 'D', 'L', 'W', 'U',\n 'G', 'F', 'B', 'M', 'Y', 'C', 'P', 'K', 'V', 'Q', 'J', 'X', 'Z']\ndic = {}\nfor i in encoded:\n if i in freq:\n if i in dic:\n val = dic[i]+1\n dic[i] = val\n else:\n dic[i] = 1\n\ndic = {k: v for k, v in sorted(\n dic.items(), key=lambda item: item[1], reverse=True)}\n# print(dic)\nindex = 0\nfor key in dic:\n dic[key] = freq[index]\n index += 1\n# print(dic)\ndecoded = \"\"\nfor i in range(0, len(encoded)):\n if encoded[i] in freq:\n decoded += dic[encoded[i]]\n else:\n decoded += encoded[i]\nprint(decoded)\n"
}
] | 5 |
geo7/backuptest
|
https://github.com/geo7/backuptest
|
0d8c08a781521e67ed13dae0d4e89f388f7616ea
|
c091d2455339a1aa8403c0bff3306c7f3053e804
|
8576ead278d88a9ddcd3b03c90138ec61ff0cc19
|
refs/heads/master
| 2015-08-14T15:26:48.360396 | 2014-12-25T01:51:10 | 2014-12-25T01:51:10 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7292817831039429,
"alphanum_fraction": 0.7292817831039429,
"avg_line_length": 24.714284896850586,
"blob_id": "bfca298d2dc5c7c53b56945916f1e54b3d7d8bed",
"content_id": "2c9bcf17646555dc9d3a2900e499f112ab849686",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 7,
"path": "/README.md",
"repo_name": "geo7/backuptest",
"src_encoding": "UTF-8",
"text": "backuptest\n==========\n\ntesting some backup stuff\n\nWould be good to test this as a backup, ie actually restoring from it. \nPossibly create a tar ball out of it and then remount it? \n"
},
{
"alpha_fraction": 0.5870320200920105,
"alphanum_fraction": 0.5893477201461792,
"avg_line_length": 25.171716690063477,
"blob_id": "770fbaaa9703056abd13664605e5fda00deb7a7d",
"content_id": "3ca2afea4331c57e8ce754987af229f465eab188",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2591,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 99,
"path": "/home_backup.py",
"repo_name": "geo7/backuptest",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport subprocess\nimport time\nimport logging\n\n\"\"\"Script works - would need to be adjusted to \nuse with a full backup though - at the moment it\njust backs up the home directory to my external HDD.\n\nOptions that haven't been applied are multiple backups,\nit might be nice to have two backups at any time incase \nI wanted to roll back further? \n\nCompression isn't used, but might not be needed(?) I'm \nnot sure if there would be any point in compressing this\nor not. \n\nScript needs to be refactored. \n\"\"\"\n\n# ---------------------------------------\n# FUNCTIONS\n# -------------------------------------- \n\ndef create_file(a_file_name): \n \"\"\"creates file of given name and \n returns it\"\"\"\n subprocess.call(['touch',a_file_name])\n return open(a_file_name, 'w+')\n\ndef write_exclusions(ex_list, a_file):\n \"\"\"This will write a list to a given \n file, ensuring that each list element\n is on a new line\"\"\"\n for entry in ex_list:\n a_file.write(entry+'\\n')\n\ndef check_drive_connected(path):\n if not os.path.exists(\"/media/vco/g500\"):\n print(\"Hard Drive not connected\")\n exit()\n\ndef create_package_file():\n args = ['dpkg', '--get-selections']\n with open('a_file.txt', 'w+') as outfile:\n subprocess.call(args, stdout=outfile)\n\n# -----------------------------------------\n# Variables\n\nstart_dir= '/home/vco/'\ndest_dir = '/media/vco/g500/' \nfile_name = 'rsyncexclude.txt'\n\n# ----------------------------------------\n# Main Script\n\n# if the drive isn't connected the script exits, otherwise\n# it just creates another dir in /media (not wanted)\ncheck_drive_connected(start_dir)\ncreate_package_file()\nexclude_file = create_file(file_name)\n\n# These are the directories to be\n# excluded\ndirectories = [\n '/musik/',\n '/notthis/', \n '/blah.py',\n 'R-Pi/',\n 'Downloads/',\n 'ImageFiles/',\n 'VirtualBox VMs/',\n '.config/google-chrome/',\n '.cache/google-chrome/',\n ]\n\nwrite_exclusions(directories, exclude_file)\n\n# Closes the text file, other wise\n# the subprocess call misses it.\nexclude_file.close()\n\n# This list makes up the actual rsync\n# command that will be executed. \nargstwo= [\"rsync\",\n \"-avhXA\",\n \"--exclude-from=\"+file_name,\n \"--delete\",\n start_dir,\n dest_dir,\n ]\n\n# executes the rsync command. \nsubprocess.call(argstwo)\n# Deletes the text file that contained the\n# exclude information. \nsubprocess.call(['rm', file_name])\n"
}
] | 2 |
juanclopezr/JuanLopez_Ej1
|
https://github.com/juanclopezr/JuanLopez_Ej1
|
09e999cbb0842e3e5349d4f53e318a961b2b57c7
|
13872d8821f53045ce472b6aa0fa46a24f11c7a3
|
16ea899ede76ecc3beba104b380750be08c80c4d
|
refs/heads/master
| 2021-06-12T08:48:08.690680 | 2017-02-13T02:24:45 | 2017-02-13T02:24:45 | 79,863,465 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6931818127632141,
"avg_line_length": 26.789474487304688,
"blob_id": "e985dea7cbf39395b99d1e1ef6600095965e9b5e",
"content_id": "c5db8cd8f7de66a2df2f160af97c77a7686b29c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 19,
"path": "/grafica.py",
"repo_name": "juanclopezr/JuanLopez_Ej1",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport numpy as np\n\npotential = np.loadtxt('potential.dat')\nfieldx = np.loadtxt('fieldx.dat')\nfieldy = np.loadtxt('fieldy.dat')\n\nx = np.linspace(-2.5,2.5,256)\ny = np.linspace(2.5,-2.5,256)\n\nplt.imshow(potential,extent=[-2.5,2.5,-2.5,2.5])\nplt.xlim([-2.5,2.5])\nplt.ylim([-2.5,2.5])\nplt.xlabel('$x$(cm)')\nplt.ylabel('$y$(cm)')\nplt.colorbar(label='Voltage(V)')\nplt.streamplot(x,y,fieldx,fieldy,color='black')\nplt.title('Electric potential and electric field in capacitor')\nplt.savefig('placas.pdf')\n"
},
{
"alpha_fraction": 0.7341772317886353,
"alphanum_fraction": 0.7383966445922852,
"avg_line_length": 20.545454025268555,
"blob_id": "9ecbfc5ffcd79a21ca72e3463c6b37d388d8da5a",
"content_id": "89483825ddfec66dd38db380c91213f457c4ced7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 11,
"path": "/makefile",
"repo_name": "juanclopezr/JuanLopez_Ej1",
"src_encoding": "UTF-8",
"text": "placas.pdf : grafica.py potential.dat fieldx.dat fieldy.dat\n\tpython grafica.py\n\npotential.dat fieldx.dat fieldy.dat: a.out\n\tmpiexec -n 8 ./a.out\n\na.out : placas.c\n\tmpicc placas.c\n\nclean :\n\trm -f potential.dat fieldx.dat fieldy.dat a.out\n"
}
] | 2 |
GustavoRequiez/module_odoo11_01
|
https://github.com/GustavoRequiez/module_odoo11_01
|
ca057564b6baac43ffa1eedbac621857e2575b8e
|
fb98b4bd64951d2f74d256236f76a818c26ee8ae
|
af484c83f4a93e9ae82b86ea50b6d1e55fcb9f5f
|
refs/heads/master
| 2020-03-15T06:23:07.678246 | 2018-05-10T17:52:45 | 2018-05-10T17:52:45 | 132,006,489 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7162790894508362,
"alphanum_fraction": 0.7162790894508362,
"avg_line_length": 25.75,
"blob_id": "aba7465c5ea253f1e9b1d70aa2b56f8d5159cddf",
"content_id": "5003ec1e2529990a1b7746922e1bbb515eded4a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 8,
"path": "/models/product.py",
"repo_name": "GustavoRequiez/module_odoo11_01",
"src_encoding": "UTF-8",
"text": "from openerp import api, fields, models\n\nclass Product(models.Model):\n\t_name ='product'\n\t\n\tnombre = fields.Char('Nombre', required=True)\n\tstate = fields.Boolean('State', default=True)\n\tnotes = fields.Char('Notes')\n\t"
},
{
"alpha_fraction": 0.42236024141311646,
"alphanum_fraction": 0.43478259444236755,
"avg_line_length": 15.199999809265137,
"blob_id": "94ea32900bc1f3d8ea746134307dd84b8a696777",
"content_id": "a4e254261e99c27954873e30ec6d38c9b79ab7be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 10,
"path": "/__openerp__.py",
"repo_name": "GustavoRequiez/module_odoo11_01",
"src_encoding": "UTF-8",
"text": "{\n 'name': 'First_module',\n 'version': '1.0',\n 'author': 'Gustavo',\n 'depends': ['product'\n ],\n 'data': [\n 'views/product.xml',\n ],\n}"
}
] | 2 |
manumuc/python
|
https://github.com/manumuc/python
|
e7951a9916928b83a49b5f6e4779b9199656bae4
|
ca0f959ea1cb0490b48b6c87b7ca00e02a93255f
|
e3dc3020da59d437a9bbcd1a319c76f432060bb4
|
refs/heads/master
| 2020-04-17T14:51:44.720493 | 2019-09-22T13:27:56 | 2019-09-22T13:27:56 | 166,674,746 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.49868765473365784,
"alphanum_fraction": 0.5498687624931335,
"avg_line_length": 25.172412872314453,
"blob_id": "5fce5b6d0c2cc191179dc6db7508ad0d60ab7b75",
"content_id": "616fc3a829f71d535ad1f1c4fc5b249b85cdda53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 762,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 29,
"path": "/datatyp-examples.py",
"repo_name": "manumuc/python",
"src_encoding": "UTF-8",
"text": "\n\nlist = ['Booboo', 10, 'Orestis', 5, 'Homer', 7, 'Asterix', 9] \nlist...#Output:...['Booboo', 10, 'Orestis', 5, 'Homer', 7, 'Asterix', 9] \n\ndict = {'Booboo':10, 'Orestis': 5, 'Hommer': 7, 'Asterix': 9} \ndict\n#Output:...{'Asterix': 9, 'Hommer': 7, 'Orestis': 5, 'Booboo': 10} \ndict['Hommer']\n#Output:...7 \ndict.keys()\n#Output:...['Asterix', 'Hommer', 'Orestis', 'Booboo'] \ndict.values()\n#Output:...[9, 7, 5, 10] \nmath.fsum(dict.values()) / len(dict)\n#Output:...7.75 \n\n# ------------ \n\n#!/usr/python \n\nImport sys\n# the sys module helps to handle the user args \n\nIf len(sys.argv) < 3: \n Sys.exit( '\\n' + 'Usage' + sys.argv[0] + ' <num1> <num2> + '\\n') \n\nnum1 = int(sys.argv[1]) \nnum2 = int(sys.argv[2]) \nthe_sum = num1 + num2 \nprint '\\n' + str(the_sum) + '\\n' \n"
},
{
"alpha_fraction": 0.66847825050354,
"alphanum_fraction": 0.6847826242446899,
"avg_line_length": 27.30769157409668,
"blob_id": "8bddb723dd02c0e4cb03565681ee373bffbe51a6",
"content_id": "4671959fb7c3350aea0281dfbbd274792b985d26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 13,
"path": "/autodepl-lamp-co7-freebsd10.py",
"repo_name": "manumuc/python",
"src_encoding": "UTF-8",
"text": "# Purpose: \n# deploy and full Lamp ( Apache /MariaDB server /Php) stack installation from your laptop or local computer to your Centos 7 or FreeBSD 10.x server. \n# Result:\n# LAmP installed on Centos 7 or FreeBSD \n# Ingredense \n# Admin PC \n# Destination Server (puppetnode1) \n# Destination Server (puppetnode2) \n# Time to do \n\n# Import: \n# Os \n# sys\n"
},
{
"alpha_fraction": 0.5784615278244019,
"alphanum_fraction": 0.6061538457870483,
"avg_line_length": 35.11111068725586,
"blob_id": "0dbb57fdc657236c7ebe723ab4b4572a6bc50062",
"content_id": "20d1bf450eb07e7badcbfc8c5a1709b71078743b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 650,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 18,
"path": "/icap-client-options.py",
"repo_name": "manumuc/python",
"src_encoding": "UTF-8",
"text": "# http://icap-server.sourceforge.net/#_TOC22\ndef icap_request(ip):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, 1344))\n # \"icap://\" is what makes ICAP not quite HTTP\n s.send(bytes(\"OPTIONS icap://{}/symcscanreq-av-url ICAP/1.0\\n\\n\".format(ip), 'UTF-8'))\n data = s.recv(1024)\n s.close()\n \n # Now I have the data, but let's make it easier to work with\n data_dict = {}\n data_str = data.decode('UTF-8').split('\\r\\n')[2:-2]\n for line in data_str:\n split_line = line.split(':')\n key = split_line[0]\n value = ''.join(split_line[1:]).strip()\n data_dict[key] = value\n return data_dict\n"
},
{
"alpha_fraction": 0.7125193476676941,
"alphanum_fraction": 0.7156105041503906,
"avg_line_length": 28.409090042114258,
"blob_id": "b92d85d303a6eb26b2b6b621008e3e5c4017c0ab",
"content_id": "517c165c9b8d2b99c3f0b29055a0e33230fcf428",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 647,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 22,
"path": "/authentication/generic_comp.py",
"repo_name": "manumuc/python",
"src_encoding": "UTF-8",
"text": "# python3\n# idea from https://automatetheboringstuff.com/chapter0/\n# compares an input with the content of a file\n# usage: \n# generic_comp\n# no command line arguments\n# instead of clipboard if there is no agument the clipboard will be used\n# \n# Constants\nc_pwd_folder = '.'\nc_pwd_fn: 'SecretPasswordFile.txt'\n\no_password_file = open(c_pwd_folder + c_pwd_fn)\nsecretPassword = passwordFile.read()\nprint('Enter your password.')\ntypedPassword = input()\nif typedPassword == secretPassword:\n print('Access granted')\n if typedPassword == '12345':\n print('That password is one that an idiot puts on their luggage.')\n else:\n print('Access denied')\n"
},
{
"alpha_fraction": 0.6645669341087341,
"alphanum_fraction": 0.6713910698890686,
"avg_line_length": 31.827587127685547,
"blob_id": "b17cb584cf6c78c34f6e1c7b78670fb90a9b58dd",
"content_id": "6beaaabcababb990c5678ae874c2375492e3683d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1905,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 58,
"path": "/rock-paper-scissors-lizard-spock.py",
"repo_name": "manumuc/python",
"src_encoding": "UTF-8",
"text": "<pre>[cc escaped=\"true\" lang=\"python\"] \n# source: https://www.unixmen.com/rock-paper-scissors-lizard-spock-python/ \n#Include 'randrange' function (instead of the whole 'random' module \n\nfrom random import randrange \n\n# Setup a dictionary data structure (working with pairs efficientlyconverter = ['rock':0,'Spock':1,'paper':2,'lizard':3,'scissors':4] \n# retrieve the names (aka key) of the given number (aka value) \n\ndef number_to_name(number): \n If (number in converter.values()): \n Return converter.keys()[number] \n else: \n print ('Error: There is no \"' + str(number) + '\" in ' + str(converter.values()) + '\\n') \n\n# retrieve the number (aka value) of the given names (aka key) \ndef name_to_number(name): \n If (name in converter.keys()): \n Return converter[name] \n else: \n print ('Error: There is no \"' + name + '\" in ' + str(converter.keys()) + '\\n') \n\ndef rpsls(name): \n player_number = name_to_number(name) \n # converts name to player_number using name_to_number \n comp_number = randrange(0,5) \n # compute random guess for comp_number using random.randrange() \n result = (player_number - comp_number) % 5 \n # compute difference of player_number and comp_number modulo five \n# Announce the opponents to each other \n print 'Player chooses ' + name \n print 'Computer chooses ' + number_to_name(comp_number) \n\n# Setup the game's rules \n win = result == 1 or result == 2 \n lose = result == 3 or result == 4 \n\n# Determine and print the results \n if win: \nprint 'Player wins!\\n' \n elif lose: \n print 'Computer wins!\\n' \n else: \n print 'Player and computer tie!\\n' \n\n# Main Program -- Test my code \n rpsls(\"rock\") \n rpsls(\"Spock\") \n rpsls(\"paper\") \n rpsls(\"lizard\") \n rpsls(\"scissors\") \n\n# Check my Helper Function reliability in case of wrong input \n #number_to_name(6) \n# Error in case of wrong number \n #name_to_number('Rock') \n# Error in case of wrong name \n [/cc]</pre> \n"
},
{
"alpha_fraction": 0.7175188660621643,
"alphanum_fraction": 0.7267392873764038,
"avg_line_length": 24.36170196533203,
"blob_id": "efc758bb39d6d963368b1145913cb455f1b7a31e",
"content_id": "168c31990262065b7a51850fdbdc7a4b14ded67f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1197,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 47,
"path": "/tcp-srvr-and-client.py",
"repo_name": "manumuc/python",
"src_encoding": "UTF-8",
"text": "Usage of script: python tcp_server.py> \nUsage of script: python tcp_client.py> \n\n#/usr/bin/python \n# Server for TCP -Client – save as tcp-server.py \n\nImport socket # \n\n# socket_stream for constant connection between the client and the server \nmy_socket_stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\n# set the hostname \nhostname='localhost' \n\n# set the port number to listen to \nport_number='113' \n\n# set up and start th TCP listen \nmy_socket.bind((hostname,port_number)) \nmy_socket.listen(5) \n\nwhile true: \n # wait infinitely for a client to connect \n # connection and address are new variables \n Connection, address = my_socket.accept() \n\n # after we accept the connection we inform the client that it is connected \n connection.send('You are connected') \n\n\n#/usr/bin/python \n# Client for TCP Server – save as tcp-client.py \n\nImport socket # \n\n# create a socket object and assign it to a variable \nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\n# set the servername \nserver_name='localhost' \n\n# set the port number to connect to \nport_number='113' \n\nclient_socket.connect((server_name,port_name)) \nData = client.socket.recv(1024) \nPrint data \n"
},
{
"alpha_fraction": 0.7167986035346985,
"alphanum_fraction": 0.7414247989654541,
"avg_line_length": 39.60714340209961,
"blob_id": "74ae7e2051270e242b4c451b69a41a6ee515cd8a",
"content_id": "5448489f0a12d11af25d1ec0fa681c3ff54451c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1137,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 28,
"path": "/web/mapme.py",
"repo_name": "manumuc/python",
"src_encoding": "UTF-8",
"text": "# python3\n# idea from https://automatetheboringstuff.com/chapter11/\n# opens Google maps with in the browser using an addressed pasted via clipboard or command line argument\n# usage: \n# mapme 870 Valencia St, San Francisco, CA 94110\n# mapme\n# use command line arguments instead of clipboard if there is no agument the clipboard will be used\n# Maps web site: i.e. https://www.google.com/maps/place/Parkring+29,+85748+Garching\n# Maps web site: https://www.google.com/maps/place/<your-addr-atr>\n# Maps web site: https://www.google.com/maps/place/Garching+85748+Parkring+29\n\nimport sys, webbrowser\n\n# constant values\ngoogle_maps_url = 'http://maps.google.com/maps/place/'\n\nif len(sys.argv) > 1:\n # Get address from command line.\n # you can do \"print (sys.argv) variable to see all the information\n lookup_address = ' '.join(sys.argv[1:]) # with the join argument all information will be treated as one string\nelse:\n # get address from clipboard\n lookup_address = pyperclip.paste()\n \n# Get address from cliboard\n# len(sys.argv) if > 1 then command line argument is available\n\nwebbrowser.open(google_maps_url + lookup_address)\n"
}
] | 7 |
rifkymuth/Tucil3_13519123
|
https://github.com/rifkymuth/Tucil3_13519123
|
353a0b72c8af4b3cb2731c7648471b03dc65a5a8
|
8cffbd2349f87f3fa8cd1e86b17c407ebecb4886
|
aae2c887c4595a76ce81d8e722c3b1a07b71b906
|
refs/heads/main
| 2023-04-05T00:55:47.380934 | 2021-04-06T17:36:14 | 2021-04-06T17:36:14 | 355,248,167 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7699005007743835,
"alphanum_fraction": 0.7798507213592529,
"avg_line_length": 43.66666793823242,
"blob_id": "44bca49560a332003f3ee24015f562c95983830f",
"content_id": "1b4585f658d0cda35f1dd89da43293e6f6fbfdcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 232,
"num_lines": 18,
"path": "/README.md",
"repo_name": "rifkymuth/Tucil3_13519123",
"src_encoding": "UTF-8",
"text": "# AStar Path Finder\nAstar Path Finder is a simple program that show the shortest route from input map. Shortest route is generated using a* algorithm. The program is build using python and jupyter notebook to print the graph representing the input map.\n\n## Prerequisites\n- python v3.8.x\n- jupyter notebook\n`pip install jupyter`\n- networkx\n`pip install networkx`\n- matplotlib\n`pip install matplotlib`\n\n## How To Use Program\n1) Clone this repository to local\n2) open jupyter notebook from command line and open the `tucil3.ipynb`\n3) Change path of input file from `config.json`. Change the 'path' value to the desirable map input file\n4) To run the program from jupyter, press 'run' from the jupyter on the first cell\n5) To use again the program, restart kernel and clear output then rerun then first cell\n"
},
{
"alpha_fraction": 0.5198938846588135,
"alphanum_fraction": 0.540229856967926,
"avg_line_length": 25.928571701049805,
"blob_id": "59e5b76508520c3c6cb985d99e6a71354430eafa",
"content_id": "756287dd0e0ca6a70cea6873d459e6947aba8ec3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1131,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 42,
"path": "/FileReader.py",
"repo_name": "rifkymuth/Tucil3_13519123",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport networkx as nx\nimport csv\nimport json\n\n\ndef read_config():\n f = open('config.json', 'r')\n data = json.load(f)\n f.close()\n return data['path']\n\n\ndef create_graph(G):\n path = read_config()\n f = open(path, 'r')\n input_text = f.read().split(\"---\")\n input_line_nodes = input_text[0].strip().split('\\n')\n input_nodes = []\n x_dplc = 0\n y_dplc = 0\n for l in input_line_nodes:\n input_nodes.append(l.split(','))\n firstNode = False\n for l in input_nodes:\n if (not firstNode):\n x_dplc = float(l[2])\n y_dplc = float(l[1])\n firstNode = True\n name = l[0].strip()\n posX = (float(l[2]) - x_dplc) * 100000\n posY = (float(l[1]) - y_dplc) * 100000\n pos = (posX, posY)\n G.add_node(name, pos=pos)\n input_line_edges = input_text[1].strip().split('\\n')\n input_edges = []\n for edge in input_line_edges:\n e = edge.split(',')\n input_edges.append((e[0].strip(), e[1].strip()))\n G.add_edges_from(input_edges)\n nx.set_edge_attributes(G, 'k', 'color')\n f.close()\n"
},
{
"alpha_fraction": 0.49302607774734497,
"alphanum_fraction": 0.5063675045967102,
"avg_line_length": 27.431034088134766,
"blob_id": "aa16dacc277a60fa3cb5c0b007bf766fdec6168d",
"content_id": "a4c7af6f75c01baa0500b542bffe87b30d814d62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1649,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 58,
"path": "/AStar.py",
"repo_name": "rifkymuth/Tucil3_13519123",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport networkx as nx\nimport math\n\n\ndef h(N, a, b):\n return (math.sqrt(math.pow((N[a][0] - N[b][0]), 2) + math.pow((N[a][1] - N[b][1]), 2)))\n\n\ndef g(N, route, a):\n cost = 0.0\n for i in range(len(route)-1):\n cost += h(N, route[i], route[i+1])\n cost += h(N, route[-1], a)\n return cost\n\n\ndef AStar(G, start, goal):\n result = []\n nodes = nx.get_node_attributes(G, 'pos')\n f = h(nodes, start, goal)\n stack = [[start, f]]\n BFS(G, goal, stack, result, nodes)\n return result\n\n\ndef BFS(G, goal, stack, result, nodes):\n if (stack and checkSmallerThanResult(stack, result)):\n currRoute = stack.pop(0)\n route_list = currRoute[0].split(\"-\")\n route_cost = currRoute[1]\n if (route_list[-1] == goal):\n if (not result):\n result.append(currRoute)\n elif (route_cost < result[0][1]):\n result.clear()\n result.append(currRoute)\n else:\n for node in G[route_list[-1]]:\n if (node in [route_list]):\n continue\n else:\n f = g(nodes, route_list, node) + h(nodes, node, goal)\n route_list.append(node)\n stack.append([\"-\".join(route_list), f])\n route_list.pop()\n stack.sort(key=lambda item: item[1])\n BFS(G, goal, stack, result, nodes)\n\n\ndef checkSmallerThanResult(stack, result):\n if (not result):\n return True\n else:\n for item in stack:\n if item[1] < result[0][1]:\n return True\n return False\n"
}
] | 3 |
sameid/quickds
|
https://github.com/sameid/quickds
|
2c6d27dc82d1712ea754516137b276e07c1c6b3c
|
1535f9fb4d6aff009ac09d594da0385f90697a15
|
917561900ed3b4fac50446d0dd4dd77e92b3633d
|
refs/heads/master
| 2020-12-30T09:26:03.561669 | 2014-05-13T22:00:58 | 2014-05-13T22:00:58 | 19,755,583 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7075232267379761,
"alphanum_fraction": 0.7379543781280518,
"avg_line_length": 45.31999969482422,
"blob_id": "dfd4ccf6ef712c59d8bf610bbbc7b7fcae8cdfa7",
"content_id": "b287bd2b026ee06762baabb62f2a27448a3a5983",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2366,
"license_type": "no_license",
"max_line_length": 284,
"num_lines": 50,
"path": "/README.md",
"repo_name": "sameid/quickds",
"src_encoding": "UTF-8",
"text": "# quickds - Automate Datasource Creation via the Klipfolio API #\r\n\r\nquickds will parse a CSV file with a list of datasource configurations.\r\nThen it will create them on the environment you point to via the configuration\r\nblock.\r\n\r\n## Getting Started ##\r\n\r\nFirst thing to do is configure the script using the config.json file.\r\n\r\nconfig.json\r\n``` javascript\r\n{\r\n\t\"host\": \"https://test-app.klipfolio.com/api/1\",\t//environment where datasources will be created\r\n\t\"user\": \"[email protected]\", //username \r\n\t\"pass\": \"ihaveallthekeys\", //password\r\n\t\"input\": \"input.csv\", //datasource configuration file\r\n}\r\n```\r\n\r\nEnsure your datasource configuration file has the datasource creation information that you want.\r\n- Remember that columns 1 .. 6 (type, name, format, refresh, oauth, query) are manditory and must maintain order.\r\n- Columns 7 .. * can be dynamic and must relate to parameters in the \"query\" column to make any changes to the \"query\"\r\n\r\ninput.csv (can be named anything, so long as you specify it in the config.json file)\r\n```\r\ntype, name, format, refresh, oauth, query, ids, dimension, metrics, filters, start-date, end-date, max-results\r\ngoogle_analytics,ds1,csv,300,bee05039efe0bb0f0bb007afcc116f94,https://www.googleapis.com/analytics/v3/data/ga?ids=*&dimensions=*&metrics=*&filters=*&start-date=*&end-date=*&max-results=*,ga:47757403,ga:visitors,ga:avgTimeOnPage,ga:country==Russia,{date.startOfYear},{date.today},10000\r\ngoogle_analytics,ds2,csv,300,bee05039efe0bb0f0bb007afcc116f94,https://www.googleapis.com/analytics/v3/data/ga?ids=*&dimensions=*&metrics=*&filters=*&start-date=*&end-date=*&max-results=*,ga:47757403,ga:visitors,ga:avgTimeOnPage,ga:country==Russia,{date.startOfYear},{date.today},10000\r\n```\r\n\r\n#### Running the script ####\r\n\r\nOnce you have configured everything. Run the script:\r\n\r\n```\r\n$ python quickds.py\r\n```\r\n\r\nOptionally tell it what file to use on the fly:\r\n\r\n```\r\n$ python quickds.py my_ds_file.csv\r\n```\r\n\r\n## Quirks ##\r\n\r\n- Currently the API doesn't expose anything for OAuth tokens. What this means is that when you specify the publicId of the OAuth token in the input.csv file,\r\nit won't actually use that token, and most likely the report on the creation of that datasource will show an error. For now just specify a publicId of\r\na datasource that is using an OAuth token that you want. (Adding OAuth token API support is on the roadmap)\r\n"
},
{
"alpha_fraction": 0.5243286490440369,
"alphanum_fraction": 0.5283169150352478,
"avg_line_length": 26.278196334838867,
"blob_id": "36078454e593af869735f25d1bb110ed271900ba",
"content_id": "e95ad798bc45c489e46ee407d45994931a191cbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3761,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 133,
"path": "/quickds.py",
"repo_name": "sameid/quickds",
"src_encoding": "UTF-8",
"text": "import requests\r\nimport json\r\nimport csv\r\nimport sys\r\nimport urlparse\r\nimport urllib\r\n\r\na = sys.argv\r\n\r\n\r\nif len(a) > 2:\r\n print 'Too many arguments provided'\r\n print 'Aborting...'\r\n sys.exit()\r\n\r\nTYPE = 0\r\nNAME = 1\r\nFORMAT = 2\r\nREFRESH = 3\r\nOAUTH = 4\r\nQUERY = 5\r\n\r\nDYNAMIC = 6\r\n\r\nconfig = open('./config.json').read()\r\n_c = json.loads(config) #_c represents configuration\r\n\r\n#################\r\n## util functions specified with _*\r\ninput_file = _c['input']\r\nif len(a) == 2:\r\n input_file = a[1]\r\n\r\ndef _get(req, host=_c['host']):\r\n res = requests.get(host+req, auth=(_c['user'],_c['pass']), verify=False)\r\n return res.json()\r\n\r\ndef _post(req, data, headers, host=_c['host']):\r\n res = requests.post(host+req, auth=(_c['user'],_c['pass']), data=json.dumps(data),headers=headers,verify=False)\r\n return res.json()\r\n\r\ndef _pprint(data):\r\n print ((json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))))\r\n return;\r\n\r\n##################\r\n\r\nsetup_map = []\r\n\r\ndef parse_url(_query, _key, _value):\r\n y = urlparse.urlparse(_query)\r\n z = urlparse.parse_qsl(y.query)\r\n for n,key in enumerate(z):\r\n if key[0] == _key:\r\n lst = list(key)\r\n lst[1] = _value\r\n key = tuple(lst)\r\n z[n] = key\r\n q = urllib.unquote(urllib.urlencode(z))\r\n new_url = urlparse.ParseResult(scheme=y.scheme, netloc=y.netloc, path=y.path, query=q, fragment=y.fragment, params=y.params)\r\n new_url = urlparse.urlunparse(new_url)\r\n return new_url\r\n\r\ndef build_query(query, data):\r\n for v in data:\r\n key = setup_map[data.index(v)]\r\n value = v\r\n query = parse_url(query, key, value)\r\n return query\r\n\r\ndef create_datasource(data):\r\n m = data[:DYNAMIC]\r\n q = data[DYNAMIC:]\r\n query = build_query(m[QUERY], q)\r\n oauth = m[OAUTH]\r\n oauth_ds = _get(req='/datasources/'+oauth+'?full=true')\r\n try:\r\n props = oauth_ds['data']['properties']\r\n except KeyError:\r\n print 'Invalid Datasource ID provided in OAuth field in '+ input_file\r\n print 'Aborting...'\r\n sys.exit()\r\n\r\n if m[TYPE] == 'google_analytics':\r\n payload_props = {\r\n \"max_pages\":1,\r\n \"endpoint_url\": query,\r\n \"advancedQuery\": query,\r\n \"mode\":\"Advanced\",\r\n \"token_id\":props['token_id'],\r\n \"oauth_provider_id\": props['oauth_provider_id'],\r\n \"oauth_use_header\": props['oauth_use_header'],\r\n \"oauth_user_token\": props['oauth_user_token']\r\n }\r\n elif m[TYPE] == 'facebook':\r\n payload_props = {\r\n \"max_pages\":1,\r\n \"endpoint_url\": query,\r\n \"advancedQuery\": query,\r\n \"mode\":\"Advanced\",\r\n \"token_id\":props['token_id'],\r\n \"oauth_provider_id\": props['oauth_provider_id'],\r\n \"qtype\": props['qtype'],\r\n \"oauth_user_token\": props['oauth_user_token']\r\n }\r\n\r\n payload = {\r\n \"name\":m[NAME],\r\n \"description\": \"-\",\r\n \"format\":m[FORMAT],\r\n \"connector\":m[TYPE],\r\n \"refresh_interval\":int(m[REFRESH]),\r\n \"is_dynamic\":False,\r\n \"properties\":payload_props\r\n }\r\n\r\n headers = { \"Content-Type\": \"application/json\"}\r\n r=_post(req='/datasources', data=payload, headers=headers)\r\n _pprint(r)\r\n\r\ntry:\r\n with open (input_file, 'rt') as f:\r\n reader = csv.reader(f, delimiter=',')\r\n ignore = True\r\n for row in reader:\r\n if not ignore:\r\n create_datasource(row)\r\n else:\r\n setup_map = row[DYNAMIC:]\r\n ignore = False\r\nexcept IOError:\r\n print 'unable to read ' + input_file\r\n print 'Aborting...'\r\n"
}
] | 2 |
FLY-CODE77/EDA-project
|
https://github.com/FLY-CODE77/EDA-project
|
0155a8f6d4aff95d614bf78dd95c9b8676740dfa
|
42643136ce1fe6255b61f4ff79662b1f276be946
|
311353df499670a87a82d0681a322fb8172551de
|
refs/heads/master
| 2023-06-11T20:31:13.134750 | 2021-07-11T17:36:37 | 2021-07-11T17:36:37 | 384,945,093 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5853174328804016,
"alphanum_fraction": 0.5992063283920288,
"avg_line_length": 17.66666603088379,
"blob_id": "018fbd5c046a797cc826486ee47bffcd9a19b529",
"content_id": "e53e825cc623692293bd468ff1ad1e58f3a28296",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 27,
"path": "/japanese-car-eda/module/dataload.py",
"repo_name": "FLY-CODE77/EDA-project",
"src_encoding": "UTF-8",
"text": "import pymysql\nimport pandas as pd\n\n'''\ndatabase connection module\n'''\ndef login():\n mydb = pymysql.connect(\n user='root',\n passwd='-',\n host='-',\n db='-',\n charset='utf8',\n )\n cursor = mydb.cursor(pymysql.cursors.DictCursor)\n return mydb, cursor\n\n'''\ndata load from database\nif you give qry return dataset\n'''\ndef getdata(qry1):\n mydb, cursor = login()\n cursor.execute(qry1)\n rlt1 = cursor.fetchall()\n df_1 = pd.DataFrame(rlt1)\n return df_1\n"
},
{
"alpha_fraction": 0.5530726313591003,
"alphanum_fraction": 0.5530726313591003,
"avg_line_length": 17,
"blob_id": "37330475f0a3093ae76f355023929caae42036c6",
"content_id": "ef8ace720b5bb80c610eadc0cea710db972f14ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 10,
"path": "/japanese-car-eda/module/query.py",
"repo_name": "FLY-CODE77/EDA-project",
"src_encoding": "UTF-8",
"text": "'''\nget whole table query generator\n'''\ndef get_table(table):\n qry = (\n \"\"\"\n SELECT * FROM my_db.{};\n \"\"\").format(table)\n print(\"your query is\", qry)\n return qry"
},
{
"alpha_fraction": 0.6077953577041626,
"alphanum_fraction": 0.669914722442627,
"avg_line_length": 24.5625,
"blob_id": "0a00b1ff96edd1c12c68e9f6c6aca5a759d90e2e",
"content_id": "d562502cb953448a36cce11c6e17bf3cbc20dedf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1517,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 32,
"path": "/japanese-car-eda/README.md",
"repo_name": "FLY-CODE77/EDA-project",
"src_encoding": "UTF-8",
"text": "# 반일 정서시 일본 자동차 판매량 변화 리포트\n\n> 분석 그래프\n<img src='https://img1.daumcdn.net/thumb/R1280x0/?scode=mtistory2&fname=https%3A%2F%2Fblog.kakaocdn.net%2Fdn%2FbfEypq%2Fbtq81TMtAsa%2FFbxZah80RCvjC2lu7QoT31%2Fimg.png'> \n\n---\n- 파란색 라인 : 반일 감정이 일어난 시기 \n - 16년 7월 일본해 표기, 촛불집회 비하 사건\n - 18년 12월 전범기업 이슈\n - 19년 9월 대대적 불매운동\n---\n- 검은색 라인 : 혼다 자동차 판매량이 급감한 시기\n---\n- 초록색 라인 : 도요타 자동차 판매량이 급격한 시기\n---\n> 결론 \n- https://www.hankookilbo.com/News/Read/202001061185366881(뉴스기사 참조)\n- 기존 일본차 판매에 대한 인식은 충성도 높은 고객들이 많아\n- 한일 간 갈등에 일본차의 판매는 큰 영향이 없다고 알고 있었는데\n- *실제 데이터로 분석* 해본 결과\n- 일본 자동차 등록 대수는 반일 감정의 파장이 도달하는 시간이 조금씩 차이가 날 뿐\n- 상당히 연관성이 있다고 판단 됩니다.\n- 또한 이러한 감정으로 인한 판매량 감소는 회복까지 오랜 시간이 걸리는것을 \n- *데이터분석*을 통해 알수 있었습니다\n\n> 제안\n- 여러 매체의 실시간 정보를 수집\n- 이러한 정서를 미리 판단 가능 하다면 광고 전략 및 판매 전략에 \n- 소중한 자료로서 사용할 수 있을 것으로 사료됩니다.\n\n> 데이터 출처\n- 한국수입자동차협회(kaida)\n\n\n\n"
},
{
"alpha_fraction": 0.6114325523376465,
"alphanum_fraction": 0.7432929873466492,
"avg_line_length": 53.83654022216797,
"blob_id": "2af08684c4769df2bd953df7a6758e7cc81c1139",
"content_id": "e865a7db5fc9041ae5e1b0c42c74dd06da064cd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7235,
"license_type": "no_license",
"max_line_length": 1342,
"num_lines": 104,
"path": "/Korea-movie-eda/README.md",
"repo_name": "FLY-CODE77/EDA-project",
"src_encoding": "UTF-8",
"text": "[](https://www.python.org/) [](https://numpy.org/) [](https://pandas.pydata.org/) [](https://matplotlib.org/) [](https://seaborn.pydata.org/) [](https://plotly.com/) [](https://pyecharts.org/) [](https://jupyter.org/)\n\n #### 역활 분담 : \n > 정솔 : 데이터 전처리, 데이터 시각화\n >\n > 정현석 : 데이터 전처리, 리팩토링\n\n\n<h1><b> KoreaMovie_EDA </b></h1>\n\n- 극장 뿐만 아니라 OTT 서비스에서도 독립/예술 영화 서비스 경쟁 진행 중\n- 이는 지난 십년 동안 한국에서도 독립 영화 시장이 커져갔고 이에 따라 **‘영화’** 라는 단어의 의미 확장이 진행되었다고 생각\n\n- 전체적인 영화 산업에 대한 분석 진행 \n- 추가적으로 일반영화와 독립/예술영화을 기준으로 분류하여 비교 분석\n\n\n<br>\n<h2><b> 데이터 소개 </b></h2>\n<p align=\"center\">\n<img src = 'https://postfiles.pstatic.net/MjAyMTA0MjBfMjcg/MDAxNjE4OTI1MzM1NDQ5.1ycZuuuOgnXxm3xfIgNEObKuHgBdWb_JYLYlTBz_HKMg.hFEzo5R39BQ37r6C5w8VLVGcHn8kMJgPhjHjodPtrQEg.PNG.greatsol98/%EC%8A%A4%ED%81%AC%EB%A6%B0%EC%83%B7_2021-04-18_%EC%98%A4%ED%9B%84_8.01.22.png?type=w966' width='700'></p>\n\n- 2011년 1월 부터 2021년 4월 9일까지 개봉한 전체 영화 일람 데이터 \n\n> **출처 : 한국영화진흥위원회 박스오피스 https://www.kofic.or.kr/kofic/business/infm/introBoxOffice.do**\n\n<br>\n<h2><b> 결론 </b></h2>\n\n<br>\n<p align='center'><img src='https://user-images.githubusercontent.com/72845895/121806283-2e57ef00-cc8a-11eb-96b7-62217f4ab107.png'></p>\n<br>\n\n- 일반 영화가 4개의 작품이 개봉할 때 독립/예술 영화는 1개의 작품만 개봉\n\n<br>\n<p align='center'><img src='https://user-images.githubusercontent.com/72845895/121806270-28620e00-cc8a-11eb-99ba-ed598dbd9df3.png'></p>\n<br>\n\n- 2018년부터 독립/예술 영화의 지속적인 감소세가 보이고 있었으며 2019년에는 예년 수준으로 잠시 회복했으나 2020년 코로나로 인하여 다시 또 하락\n\n<br>\n<p align=\"center\"> <img src='https://user-images.githubusercontent.com/72845895/121806278-2d26c200-cc8a-11eb-9602-8bf4752a0a98.png' width='415'> <img src='https://user-images.githubusercontent.com/72845895/121806287-3021b280-cc8a-11eb-9138-74e6ef904df3.png' width='415'> <img src='https://user-images.githubusercontent.com/72845895/121806279-2dbf5880-cc8a-11eb-94f0-51377412d4a8.png'></p>\n<br>\n\n- 전체 개봉 영화 중에는 청소년 관람불가 작품이 많고 장르적으로는 독립/예술 영화는 다큐멘터리, 일반 영화는 멜로/로맨스 작품이 가장 많이 개봉\n- 특이하게 일반영화에서는 성인물 작품 수가 많음 \n\n<br>\n<p align=\"center\"> <img src='https://user-images.githubusercontent.com/72845895/121806296-32840c80-cc8a-11eb-9013-0de07d06b5c4.png' width='415'> <img src='https://user-images.githubusercontent.com/72845895/121806292-31eb7600-cc8a-11eb-9b54-6805641d530b.png' width='415'> <img src='https://user-images.githubusercontent.com/72845895/121806285-2ef08580-cc8a-11eb-81b0-fa6eba5b6d7f.png'></p>\n<br>\n\n\n- 매출액이 높은 영화의 장르는 드라마, 범죄, 사극 순으로 많이 분포하였고 12세와 15세이상 관람가 작품이 많음\n- 그러나 장르와 등급의 상관관계에서는 액션과 15세이상 관람가, 액션과 12세이상 관람가, 애니메이션과 전체관람가가 가장 유의미하게 상관이 있다는 결과가 나옴\n\n<br>\n<p align='center'> <img src='https://user-images.githubusercontent.com/72845895/121806277-2c8e2b80-cc8a-11eb-946e-c3a80edf4c2a.png'></p>\n<br>\n\n- 독립/예술 영화의 서울 관객수와 전국 관객수는 크게 차이가 나지 않음\n\n<br>\n<p align=\"center\"> <img width=\"415\" height='300' alt=\"rosechart1\" src=\"https://user-images.githubusercontent.com/72845895/121806288-30ba4900-cc8a-11eb-9c0d-3ca90876e5e8.png\"><img width=\"415\" height='300' alt=\"rosechart2\" src=\"https://user-images.githubusercontent.com/72845895/121806291-3152df80-cc8a-11eb-89ea-fdbf9162fb45.png\"></p>\n<br>\n\n\n- 유독 멜로/로맨스 작품에 청소년 관람불가 작품이 많았던 이유는 실제적으로는 성인물(에로) 작품이 멜로/로맨스 장르로 구분되어 있기 때문\n- 이런 영화들은 주로 스크린수가 1개로 잡혀있는데 이것을 “형식 개봉 영화” 라고 하고 한국과 일본 국적을 가진 영화가 가장 많음\n\n<br>\n<p>\n<img src='https://user-images.githubusercontent.com/72845895/121806299-33b53980-cc8a-11eb-87ae-7c3280329bce.png' width='415'><img src='https://user-images.githubusercontent.com/72845895/121806298-331ca300-cc8a-11eb-80b9-b2668d59aeb6.png' width='415'></p>\n<br>\n\n\n- 국내 독립/예술 영화는 관객들에게 감성적으로 어필\n\n\n\n<br>\n<h2><b> 과정 </b></h2>\n\n- 모듈\n> - rate.py : 영화 등급 분류\n\n<img src ='https://postfiles.pstatic.net/MjAyMTA0MjBfNTcg/MDAxNjE4OTI2Mjc2NDk4.CPVi8k_VRC_K296mTCFMcKwAMhext4GEvAlRdU3htrYg.NwslRAo0A0qvIY95SXM2K8sStzjlSEgyOveNnJH2qwsg.PNG.greatsol98/rate.png?type=w966'>\n\n> - wordcount.py : 가장 많이 사용된 20개의 단어 추출\n\n<img src = 'https://postfiles.pstatic.net/MjAyMTA0MjBfNzMg/MDAxNjE4OTI2Mjc2MjEy.jWPEqqvz9mkC03BdX4e4MGq6EWhqJOeYXHYjK-cVWUcg.1drQ_DQny32hBPFGb4Xq0xQBbGmKKbQwXP0KyTHr13Eg.PNG.greatsol98/wordcount.png?type=w966'>\n\n<br>\n<h2><b> 개선사항 </b></h2>\n\n- 아웃라이어 제거 후 EDA 진행\n\n<br>\n<h2><b> 참고 </b></h2>\n\n- <독립 영화에 대한 현실적인 고민> 부산일보 : http://weekly.pusan.ac.kr/news/articleView.html?idxno=10499\n- <절반으로 뚝 떨어진 독립영화 관객... 한국영화의 '위기'> 오마이뉴스 : http://star.ohmynews.com/NWS_Web/OhmyStar/at_pg.aspx?CNTN_CD=A0002512932&CMPT_CD=P0010&utm_source=naver&utm_medium=newsearch&utm_campaign=naver_news\n- <서울 사람들이 지방 사람들보다 외국영화를 더 좋아한다고?> 경향신문 : https://www.khan.co.kr/culture/movie/article/201310141616491#csidx8d78a29a592e1c4bdd413f232e29a8e\n- <영화 제목으로 본 영한 번역기법 연구: 영화 산업 정책과 언어 정책의 변화를 중심으로> 권유진, 2020\n"
},
{
"alpha_fraction": 0.5851449370384216,
"alphanum_fraction": 0.5960144996643066,
"avg_line_length": 29.72222137451172,
"blob_id": "c5bb5a51f09782d111f94806ae5dcf0a8998aaa9",
"content_id": "da7d872c0ff900348756141d2456a2e418c4b4d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 18,
"path": "/japanese-car-eda/module/df_calc.py",
"repo_name": "FLY-CODE77/EDA-project",
"src_encoding": "UTF-8",
"text": "import pandas as pd\ndef monthly_sub(df):\n '''\n sub last month - this month car sales value\n return top 5 increasement and decreasement sales months \n and values \n '''\n\n df = df.sort_values(by='date')\n new_df = df[1:]\n new_df['before'] = df['value'][:-1].values\n new_df['sub'] = -(new_df['before'] - new_df['value'])\n \n good_day = new_df['sub'].sort_values(ascending=False)[:5]\n bad_day = new_df['sub'].sort_values()[:5]\n day_df = pd.concat([good_day, bad_day], 0)\n return day_df\n print('function activated')"
},
{
"alpha_fraction": 0.6413851976394653,
"alphanum_fraction": 0.6681538224220276,
"avg_line_length": 30.1589412689209,
"blob_id": "ff7e1c4a35d8d972cbd3ab970bfc12b2e9684436",
"content_id": "8a6e8666286a50f66944db7dfb89f09cdb5fde03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5727,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 151,
"path": "/Korea-movie-eda/main.py",
"repo_name": "FLY-CODE77/EDA-project",
"src_encoding": "UTF-8",
"text": "# warning 제거 \nimport warnings \nwarnings.filterwarnings(action='ignore')\n\n# 글꼴 설정 \nfrom module.Font import Fontmanager\npath = Fontmanager()\n\n# module \nimport pandas as pd\nimport missingno as msno\nimport datetime\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nfrom wordcloud import WordCloud, STOPWORDS\nfrom PIL import Image\nfrom wordcloud import ImageColorGenerator\nfrom pyecharts.charts import Pie\nfrom pyecharts import options as opts\nfrom pyecharts.render import make_snapshot\n\nfrom module.rate import rating\nfrom module.BubbleChart import BubbleChart\nfrom module.ColumnNanInfo import ColumnNanInfo\nfrom module.DeleteVal import DeleteVal\nfrom module.WordCount import WordCount\n\ndf = pd.read_csv('./data/KoreaMovieData.csv', thousands = ',' )\n\n# data column cleaning \ndf.columns = df.columns.str.replace('\\n','')\n\n# index order by \"순번\"\ndf.set_index(\"순번\", inplace = True)\n\n# Filling missing value : \"정보없음\"\ndf = df.fillna(\"정보없음\")\n\n# 서울매출액만 정보 없음 값 0으로 대체\ndf[\"서울매출액\"][df[\"서울매출액\"] == \"정보없음\"] = 0 \ndf[\"서울매출액\"] = df[\"서울매출액\"].astype(int)\n\n# 등급 변경 \nraking_df = []\nfor i in df[\"등급\"]:\n raking_df.append(rating(i))\n \ndf['등급'] = raking_df\n\n# 일반 영화, 독립 영화 나눠주기 \nindiefilm = df[df[\"영화구분\"] == \"독립/예술영화\"]\nfilm = df[df[\"영화구분\"] == \"일반영화\"]\n\nratio = df['영화구분'].value_counts()\nlabels = df['영화구분'].value_counts().index\nwedgeprops = {'width':0.7, 'edgecolor':'w', 'linewidth':5}\ncolors = ['seagreen','mediumpurple']\nexplode = [0, 0.10]\n\n# 독립영화, 일반 영화 전체 개봉 작품수 비교 \nplt.figure(figsize=(15,8))\n\nplt.pie(ratio, labels=labels, autopct='%.0f%%', startangle=180, counterclock=True,\n wedgeprops=wedgeprops, colors=colors, textprops={'fontsize': 20}, explode = explode)\n\nplt.text(1.4, 1, f\"독립영화 작품수 {df['영화구분'].value_counts().values[1]}\", fontsize=15)\nplt.text(1.4, 0.8, f\"일반영화 작품수 {df['영화구분'].value_counts().values[0]}\", fontsize=15)\n\nplt.axis('equal')\nplt.title('영화 구분에 따른 전체 개봉영화 작품수', fontsize=20)\nplt.savefig(\"./images/작품수(영화구분).png\", facecolor='#ffffff')\nplt.show()\n\n# 월별 전체 영화 개봉수\n# 개봉일 정보 없는 데이터들은 drop\ndf_open = df[df[\"개봉일\"] != \"정보없음\"]\n\n# 개봉일에 따른 영화 갯수 파악을 위한 전처리 작업 \ndf_open[\"개봉일\"] = df_open[\"개봉일\"].str.split(\"-\").str[0] + df_open[\"개봉일\"].str.split(\"-\").str[1]\ndf_open[\"개봉일\"] = df_open[\"개봉일\"].astype(int)\ndf_open = df_open[df_open[\"개봉일\"] >= 201101]\n\n# 데이트 타임으로 형 변환 후 그래프화 작업\ndf_open[\"개봉일\"] = pd.to_datetime(df_open[\"개봉일\"], format=\"%Y%m\")\ndf_open1 = df_open[df_open[\"영화구분\"] == \"일반영화\"]\ndf_open2 = df_open[df_open[\"영화구분\"] != \"일반영화\"]\n\n\n# 연도별 전체 영화 개봉작품 수\nplt.figure(figsize=(15,8))\nplt.rc('font',size=20)\n\nplt.plot(df_open1.groupby(df_open1[\"개봉일\"]).size(),color = 'darkkhaki', linewidth=5, linestyle=':', label='일반영화')\nplt.plot(df_open2.groupby(df_open2[\"개봉일\"]).size(),color ='teal', linewidth=5, label='독립/예술영화')\n\nplt.legend(loc=8) \nplt.grid(True, axis='y')\nplt.title(\"월별 전체 영화 개봉작품수\", fontsize=20)\nplt.xlabel('개봉일')\nplt.ylabel('영화 개수')\nplt.vlines([17470], 10,200, linestyle = ':')\nplt.annotate(\"\", xy=(18700,10),xytext=(17470,125),arrowprops={\n 'facecolor':'b', \"edgecolor\":'b','shrink' : 0.1, 'alpha':0.5 })\nplt.savefig(\"./images/개봉작품수(영화구분).png\", facecolor='#ffffff')\nplt.show()\n\n# 영화 등급 분포\n\ndatas_grade_1 = indiefilm[['등급']].groupby('등급').size().reset_index(name=\"작품수\")\ndatas_grade_1 = datas_grade_1[datas_grade_1[\"등급\"] != \"정보없음\"]\n\ndatas_grade_2 = film[['등급']].groupby('등급').size().reset_index(name=\"작품수\")\ndatas_grade_2 = datas_grade_2[datas_grade_2[\"등급\"] != \"정보없음\"]\n\ndatas_grade_1[\"영화구분\"] = \"독립/예술영화\"\ndatas_grade_2[\"영화구분\"] = \"일반영화\"\ndatas = pd.concat([datas_grade_1, datas_grade_2], 0)\n\n# 독립/예술영화, 일반영화 등급 분포 그래프\nplt.figure(figsize=(14,10))\nparameters = {'axes.labelsize': 10,\n 'axes.titlesize': 10,\n \"xtick.labelsize\":10,\n \"ytick.labelsize\":10}\n\nplt.rcParams.update(parameters)\nsns.barplot(x = datas['작품수'], y = datas['등급'], hue = datas[\"영화구분\"], palette=\"rocket_r\")\nplt.title('전체 영화 등급 분포')\nplt.savefig(\"./images/등급분포(영화구분).png\", facecolor='#ffffff')\nplt.show()\n\n# 장르 분포\n# 독립/예술영화, 일반영화 장르 분포 그래프\ndatas_genre_1 = indiefilm[['장르']].groupby('장르').size().reset_index(name=\"count\")\ndatas_genre_2 = film[['장르']].groupby('장르').size().reset_index(name=\"count\")\n\ndatas_genre_1[\"영화구분\"] = \"독립/예술영화\"\ndatas_genre_2[\"영화구분\"] = \"일반영화\"\ndatas_genre = pd.concat([datas_genre_1, datas_genre_2], 0)\n\ninterest = [\"멜로/로맨스\", \"다큐멘터리\", \"애니메이션\", \"액션\", \"공포(호러)\", \"성인물(에로)\"]\ndatas_interest = datas_genre[datas_genre[\"장르\"].isin(interest)]\ndatas_interest_pivot = datas_interest.pivot(index='장르', columns='영화구분', values='count')\n\nsns.set_palette(sns.color_palette('Dark2'))\ndatas_interest_pivot.plot.bar(stacked=True, rot=0, figsize=(20,10))\nplt.title('전체 영화 장르 분포', fontsize=20)\nplt.savefig(\"./images/장르분포.png\", facecolor='#ffffff')\nplt.show();\n\n\n"
},
{
"alpha_fraction": 0.6409148573875427,
"alphanum_fraction": 0.6828462481498718,
"avg_line_length": 31,
"blob_id": "5660cf9fc2a7ddbd9e437b4cbd3f0e396bf4ef1d",
"content_id": "bbb22919740e0e6264e2d821cfccb32a96fb26f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4689,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 123,
"path": "/japanese-car-eda/main.py",
"repo_name": "FLY-CODE77/EDA-project",
"src_encoding": "UTF-8",
"text": "from module import dataload\nfrom module import query\nfrom module import df_calc\n\nimport pandas as pd\nimport matplotlib.pylab as plt\nfrom datetime import datetime\nimport numpy as np\n\n# # get data from database \n# qry1 = query.get_table(\"TV_daily\")\n# TV_daily = dataload.getdata(qry1)\n\n# qry2 = query.get_table(\"naver_query\")\n# naver_query = dataload.getdata(qry2)\n\nqry3 = query.get_table(\"kaida\")\nkaida = dataload.getdata(qry3)\n\n# kaida datatime frame add\nkaida[\"date\"] = (kaida[\"year\"].map(str) + \"-\" + kaida[\"month\"].str.replace(\"월\", \"\"))\nkaida[\"date\"] = pd.to_datetime(kaida[\"date\"])\nkaida.drop([\"month\", \"year\"], axis=1, inplace=True)\n\n# 16-01 ~ 21-04 \nkaida = kaida[kaida[\"date\"].between(\"2016-01-01 \",\"2021-04-01\")]\n\n# I need int type in value \nkaida[\"value\"] = kaida[\"value\"].str.replace(\",\",\"\")\nkaida[\"value\"] = kaida[\"value\"] .astype(int)\n\n\n# For toyota vs honda \ntoyota_kaida = kaida[kaida[\"brand\"] == \"Toyota\"]\nhonda_kaida = kaida[kaida[\"brand\"] == \"Honda\"]\n\n# toyota sales plot\ntoyota = toyota_kaida.groupby(\"date\").sum()\nhonda = honda_kaida.groupby(\"date\").sum()\n\nplt.figure(figsize=(18,10))\nplt.title('toyota vs honda')\nplt.plot(toyota.index, toyota[\"value\"], label=\"toyota\", marker='h')\nplt.plot(honda.index, honda[\"value\"], label=\"honda\", marker='h')\nplt.axvline(x=datetime(2019, 9, 1), color='b', linestyle='--', linewidth=2)\nplt.axvline(x=datetime(2019, 12, 1), color='r', linestyle='--', linewidth=2)\n\nplt.legend()\nplt.savefig(\"./graph/toyota-honda.png\")\nplt.show()\n\n# 19년도 9월 부터 실질적인 반일 감정 발생\n# 12월달 부터 불매 운동으로 인한 실질적인 파동이 시작 ..\n# 신문 기사 인용\n# https://www.hankookilbo.com/News/Read/202001061185366881\n'''\n한일간의 갈등이 있던 이전에도 일본차의 판매는 큰 영향은 없었다”\n'''\n# 그 동안 한일 갈등에 크게 상관이 없던 일본차가 이번에는 왜 이렇게 급변했을까\n\n'''\n16년 7월 일본해 표기, 촛불집회 비하\n18년 12월 전범기업 이슈\n'''\nplt.figure(figsize=(18,10))\nplt.title('anti-japan')\nplt.plot(toyota.index, toyota[\"value\"], label=\"toyota\", marker='h')\nplt.plot(honda.index, honda[\"value\"], label=\"honda\", marker='h')\nplt.axvline(x=datetime(2016, 11, 1), color='b', linestyle='--', linewidth=2)\nplt.axvline(x=datetime(2018, 12, 1), color='b', linestyle='--', linewidth=2)\nplt.axvline(x=datetime(2019, 9, 1), color='b', linestyle='--', linewidth=2)\nplt.axvline(x=datetime(2019, 12, 1), color='r', linestyle='--', linewidth=2)\nplt.legend()\nplt.savefig(\"./graph/anti-japan.png\")\nplt.show()\n\n# for see advertisement change \n# sub val between last month and this month \n# extract top5 increase month and decreasement month \ntoyota_day = df_calc.monthly_sub(toyota)\nhonda_day = df_calc.monthly_sub(honda)\n\n#bad day of toyota and honda \ntoyota_day_bad = toyota_day[5:]\nhonda_day_bad = honda_day[5:]\n# axvline the 10 up and down days in graph\nplt.figure(figsize=(18,10))\nplt.title('anti-japan-after')\nplt.plot(toyota.index, toyota[\"value\"], label=\"toyota\", marker='h')\nplt.plot(honda.index, honda[\"value\"], label=\"honda\", marker='h')\n\nfor i in range(len(toyota_day_bad.index)):\n plt.axvline(x=toyota_day_bad.index[i], color='g', linestyle='-', linewidth=3)\n\nfor i in range(len(honda_day_bad.index)):\n plt.axvline(x=honda_day_bad.index[i], color='k', linestyle='--', linewidth=3)\n \nplt.axvline(x=datetime(2016, 11, 1), color='b', linestyle='--', linewidth=4)\nplt.axvline(x=datetime(2016, 11, 1), color='b', linestyle='--', linewidth=4)\nplt.axvline(x=datetime(2018, 12, 1), color='b', linestyle='--', linewidth=4)\nplt.axvline(x=datetime(2019, 9, 1), color='b', linestyle='--', linewidth=4)\nplt.axvline(x=datetime(2019, 12, 1), color='r', linestyle='--', linewidth=2)\nplt.legend()\nplt.savefig(\"./graph/anti-japan_after.png\")\nplt.show()\n\n''' \n유의미하다고 느껴지는것은 반일 정서가 생겼을때.. 즉각적으로 자동차 등록댓수가 줄어 들었다.\n'''\n\n# 이번 일본차 eda 결과\n'''\n기존 일본차에 대한 인식은 충성도 있는 고객이 많아서 \n대중들의 감정과 크게 상관 없이 판매되는 양은 일정하다고 여겨졌는데\n\n실제로 데이터를 분석을 해본결과\n한국의 일본 차 등록 대수는 파장이 도달하는 시간이 조금씩 차이가 날뿐\n반일 감정과 상당히 연관성이 있다고 판단이 됩니다.\n또한 이러한 감정으로 인한 판매량 감소는 회복까지 오랜 시간이 걸립니다.\n\n여러 매체나 sns를 미리 실시간으로 정보를 수집하면서, 만약 이러한 정서를 판단을 할 수 있다면,\n광고 전략 및 판매 전략에 상당히 도움이 될 것으로 사료됩니다.\n'''"
}
] | 7 |
IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard
|
https://github.com/IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard
|
7d1634fe88addd3dcee3798ed6dfa1f0b31a821a
|
a1371b288f638effbc8c772eae8b641f6f064748
|
18414bd5dc2377fe000c4b35de7a5868125089e5
|
refs/heads/master
| 2020-04-28T00:54:54.251588 | 2019-03-10T13:00:28 | 2019-03-10T13:00:28 | 174,833,597 | 0 | 1 | null | 2019-03-10T14:19:14 | 2019-03-10T14:19:09 | 2019-03-10T13:00:35 | null |
[
{
"alpha_fraction": 0.6839378476142883,
"alphanum_fraction": 0.6839378476142883,
"avg_line_length": 41.88888931274414,
"blob_id": "208a18bfeb25673b36680ba4f3cd283ddc4c003e",
"content_id": "f6118bf8ae7e2d53ac3ae932dd61d8770c237bea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 18,
"path": "/SystemCode/Recommender/urls.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'Recommender'\nurlpatterns = [\n path('', views.welcome, name='welcome'),\n path('eligibility/', views.eligibility, name='eligibility'),\n path('preferences/', views.preferences, name='preferences'),\n path('spending_checkbox/', views.spending_checkbox, name='spending_checkbox'),\n path('spending_amount/', views.spending_amount, name='spending_amount'),\n path('recommendation/', views.recommendation, name='recommendation'),\n path('no_recommendation/', views.no_recommendation, name='no_recommendation'),\n path('test/', views.test, name='test')\n #path('spending/', views.spending, name='spending'),\n #path('bank/', views.bank, name='bank'),\n #path('end/', views.end, name='end')\n]\n"
},
{
"alpha_fraction": 0.6735643148422241,
"alphanum_fraction": 0.7465705871582031,
"avg_line_length": 77.19999694824219,
"blob_id": "821ff4e02ce4e0de33f17ceaeb2fdfd049029f47",
"content_id": "b34db821e307acac5daeee05792f376403d0c7e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8602,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 110,
"path": "/SystemCode/Recommender/models.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\n#class Person(models.Model):\n# age = models.PositiveSmallIntegerField()\n# citizenship_choices = [(1, 'Singaporean'), (2, 'PR'), (3,'Foreigner')]\n# citizenship = models.CharField(max_length=12, choices=citizenship_choices)\n# annual_income = models.PositiveIntegerField()\n# total_spending_amount = models.PositiveIntegerField()\n\nclass CreditCards(models.Model):\n credit_card_id = models.PositiveSmallIntegerField(primary_key=True)\n credit_card_name = models.CharField(max_length=200, default='Unknown Credit Card Name')\n multiple_levels = models.PositiveSmallIntegerField()\n bank_name = models.CharField(max_length=200, default='Unknown Bank Name')\n card_type = models.CharField(max_length=200, default='Unknown Card Type')\n payment_networks = models.CharField(max_length=200, default='Unknown Payment Network')\n money_smart_link = models.CharField(max_length=200, default='https://www.google.com')\n official_link = models.CharField(max_length=200, default='https://www.google.com')\n age_min = models.CharField(max_length=200, default='0')\n age_max = models.CharField(max_length=200, default='999')\n gender_req = models.CharField(max_length=200, default='M')\n annual_income_singaporean_min = models.CharField(max_length=200, default='30000')\n annual_income_pr_min = models.CharField(max_length=200, default='30000')\n annual_income_foreigner_min = models.CharField(max_length=200, default='30000')\n foreign_currency_transaction_fee = models.CharField(max_length=200, default='0')\n annual_fee = models.CharField(max_length=200, default='0')\n annual_fee_waiver_min_spend = models.CharField(max_length=200, default='0')\n overall_points_cap = models.CharField(max_length=200, default='999999999')\n overall_points_min_spend = models.CharField(max_length=200, default='0')\n contactless_points_multiplier = models.CharField(max_length=200, default='0')\n contactless_points_cap = models.CharField(max_length=200, default='999999999')\n contactless_points_lot = models.CharField(max_length=200, default='1')\n dining_points_multiplier = models.CharField(max_length=200, default='0')\n dining_points_cap = models.CharField(max_length=200, default='999999999')\n dining_points_lot = models.CharField(max_length=200, default='1')\n entertainment_points_multiplier = models.CharField(max_length=200, default='0')\n entertainment_points_cap = models.CharField(max_length=200, default='999999999')\n entertainment_points_lot = models.CharField(max_length=200, default='1')\n foreign_points_multiplier = models.CharField(max_length=200, default='0')\n foreign_points_cap = models.CharField(max_length=200, default='999999999')\n foreign_points_lot = models.CharField(max_length=200, default='1')\n online_shopping_others_points_multiplier = models.CharField(max_length=200, default='0')\n online_shopping_others_points_cap = models.CharField(max_length=200, default='999999999')\n online_shopping_others_points_lot = models.CharField(max_length=200, default='1')\n online_shopping_hotels_and_flight_points_multiplier = models.CharField(max_length=200, default='0')\n online_shopping_hotels_and_flights_points_cap = models.CharField(max_length=200, default='999999999')\n online_shopping_hotels_and_flights_points_lot = models.CharField(max_length=200, default='1')\n retail_shopping_points_multiplier = models.CharField(max_length=200, default='0')\n retail_shopping_points_cap = models.CharField(max_length=200, default='999999999')\n retail_shopping_points_lot = models.CharField(max_length=200, default='1')\n points_to_miles_conversion = models.CharField(max_length=200, default='1')\n overall_cashback_cap = models.CharField(max_length=200, default='999999999')\n overall_cashback_min_spend = models.CharField(max_length=200, default='0')\n cash_cashback = models.CharField(max_length=200, default='0')\n bill_cashback_rate = models.CharField(max_length=200, default='0')\n bill_cashback_cap = models.CharField(max_length=200, default='999999999')\n bill_cashback_min_spend = models.CharField(max_length=200, default='0')\n contactless_cashback_rate = models.CharField(max_length=200, default='0')\n contactless_cashback_cap = models.CharField(max_length=200, default='999999999')\n contactless_cashback_min_spend = models.CharField(max_length=200, default='0')\n dining_cashback_rate = models.CharField(max_length=200, default='0')\n dining_cashback_cap = models.CharField(max_length=200, default='999999999')\n dining_cashback_min_spend = models.CharField(max_length=200, default='0')\n foreign_cashback_rate = models.CharField(max_length=200, default='0')\n foreign_cashback_cap = models.CharField(max_length=200, default='999999999')\n foreign_cashback_min_spend = models.CharField(max_length=200, default='0')\n groceries_overall_cashback_cap = models.CharField(max_length=200, default='999999999')\n groceries_others_cashback_rate = models.CharField(max_length=200, default='0')\n groceries_others_cashback_cap = models.CharField(max_length=200, default='999999999')\n groceries_others_cashback_min_spend = models.CharField(max_length=200, default='0')\n groceries_ntuc_cashback_rate = models.CharField(max_length=200, default='0')\n groceries_ntuc_cashback_cap = models.CharField(max_length=200, default='999999999')\n groceries_ntuc_cashback_min_spend = models.CharField(max_length=200, default='0')\n groceries_sheng_siong_cashback_rate = models.CharField(max_length=200, default='0')\n groceries_sheng_siong_cashback_cap = models.CharField(max_length=200, default='999999999')\n groceries_sheng_siong_cashback_min_spend = models.CharField(max_length=200, default='0')\n groceries_cold_storage_cashback_rate = models.CharField(max_length=200, default='0')\n groceries_cold_storage_cashback_cap = models.CharField(max_length=200, default='999999999')\n groceries_cold_storage_cashback_min_spend = models.CharField(max_length=200, default='0')\n groceries_giant_cashback_rate = models.CharField(max_length=200, default='0')\n groceries_giant_cashback_cap = models.CharField(max_length=200, default='999999999')\n groceries_giant_cashback_min_spend = models.CharField(max_length=200, default='0')\n online_shopping_overall_cashback_cap = models.CharField(max_length=200, default='999999999')\n online_shopping_others_cashback_rate = models.CharField(max_length=200, default='0')\n online_shopping_others_cashback_cap = models.CharField(max_length=200, default='999999999')\n online_shopping_others_cashback_min_spend = models.CharField(max_length=200, default='0')\n online_shopping_hotels_and_flights_cashback_rate = models.CharField(max_length=200, default='0')\n online_shopping_hotels_and_flights_cashback_cap = models.CharField(max_length=200, default='999999999')\n online_shopping_hotels_and_flights_cashback_min_spend = models.CharField(max_length=200, default='0')\n petrol_overal_cashback_cap = models.CharField(max_length=200, default='999999999')\n petrol_others_cashback_rate = models.CharField(max_length=200, default='0')\n petrol_others_cashback_cap = models.CharField(max_length=200, default='999999999')\n petrol_others_cashback_min_spend = models.CharField(max_length=200, default='0')\n petrol_esso_cashback_rate = models.CharField(max_length=200, default='0')\n petrol_esso_cashback_cap = models.CharField(max_length=200, default='999999999')\n petrol_esso_cashback_min_spend = models.CharField(max_length=200, default='0')\n petrol_caltex_cashback_rate = models.CharField(max_length=200, default='0')\n petrol_caltex_cashback_cap = models.CharField(max_length=200, default='999999999')\n petrol_caltex_cashback_min_spend = models.CharField(max_length=200, default='0')\n petrol_shell_cashback_rate = models.CharField(max_length=200, default='0')\n petrol_shell_cashback_cap = models.CharField(max_length=200, default='999999999')\n petrol_shell_cashback_min_spend = models.CharField(max_length=200, default='0')\n retail_shopping_cashback_rate = models.CharField(max_length=200, default='0')\n retail_shopping_cashback_cap = models.CharField(max_length=200, default='999999999')\n retail_shopping_cashback_min_spend = models.CharField(max_length=200, default='0')\n transport_cashback_rate = models.CharField(max_length=200, default='0')\n transport_cashback_cap = models.CharField(max_length=200, default='999999999')\n transport_cashback_min_spend = models.CharField(max_length=200, default='0')\n"
},
{
"alpha_fraction": 0.5483871102333069,
"alphanum_fraction": 0.5857385396957397,
"avg_line_length": 24.60869598388672,
"blob_id": "641ec00e298b3eb1544f58eb2550efaa47a3e56e",
"content_id": "408953178d60b0d9a0f10e6450671e8ec971ce64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 589,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 23,
"path": "/SystemCode/Recommender/migrations/0002_auto_20190224_0903.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.5 on 2019-02-24 09:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Recommender', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='creditcards',\n name='credit_card_name',\n field=models.CharField(default='Unknown Credit Card Name', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='eligible',\n field=models.BooleanField(default=None),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6892502307891846,
"alphanum_fraction": 0.6892502307891846,
"avg_line_length": 29.69444465637207,
"blob_id": "7d8c5cee3f844ea4fcdfd4120fdd483109e7fedc",
"content_id": "7dc42822048243be805c6f5083516758f9a50cb5",
"detected_licenses": [
"Python-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1107,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 36,
"path": "/SystemCode/venv/MRCard-env/lib/python3.6/site-packages/frozendict-1.2.dist-info/DESCRIPTION.rst",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "==========\nfrozendict\n==========\n\n``frozendict`` is an immutable wrapper around dictionaries that implements the\ncomplete mapping interface. It can be used as a drop-in replacement for\ndictionaries where immutability is desired.\n\nOf course, this is ``python``, and you can still poke around the object's\ninternals if you want.\n\nThe ``frozendict`` constructor mimics ``dict``, and all of the expected\ninterfaces (``iter``, ``len``, ``repr``, ``hash``, ``getitem``) are provided.\nNote that a ``frozendict`` does not guarantee the immutability of its values, so\nthe utility of ``hash`` method is restricted by usage.\n\nThe only difference is that the ``copy()`` method of ``frozendict`` takes\nvariable keyword arguments, which will be present as key/value pairs in the new,\nimmutable copy.\n\nExample shell usage:\n\n.. code-block:: python\n\n from frozendict import frozendict\n\n fd = frozendict({ 'hello': 'World' })\n\n print fd\n # <frozendict {'hello': 'World'}>\n\n print fd['hello']\n # 'World'\n\n print fd.copy(another='key/value')\n # <frozendict {'hello': 'World', 'another': 'key/value'}>\n\n\n"
},
{
"alpha_fraction": 0.5290908217430115,
"alphanum_fraction": 0.5623884797096252,
"avg_line_length": 37.079612731933594,
"blob_id": "5c33296cc7553659423c4c7e7e91029cef83f6f9",
"content_id": "42be5ee67e7183d8e461841f434b276e98cf3ebd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19611,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 515,
"path": "/SystemCode/Recommender/migrations/0004_auto_20190302_1625.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.5 on 2019-03-02 16:25\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Recommender', '0003_auto_20190224_1116'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Person',\n ),\n migrations.RemoveField(\n model_name='creditcards',\n name='eligible',\n ),\n migrations.RemoveField(\n model_name='creditcards',\n name='id',\n ),\n migrations.RemoveField(\n model_name='creditcards',\n name='total_spending_amount_min',\n ),\n migrations.AddField(\n model_name='creditcards',\n name='annual_fee',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='annual_fee_waiver_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='bank_name',\n field=models.CharField(default='Unknown Bank Name', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='bill_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='bill_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='bill_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='card_type',\n field=models.CharField(default='Unknown Card Type', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='cash_cashback',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='contactless_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='contactless_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='contactless_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='contactless_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='contactless_points_lot',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='contactless_points_multiplier',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='credit_card_id',\n field=models.PositiveSmallIntegerField(default=0, primary_key=True, serialize=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='creditcards',\n name='dining_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='dining_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='dining_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='dining_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='dining_points_lot',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='dining_points_multiplier',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='entertainment_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='entertainment_points_lot',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='entertainment_points_multiplier',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='foreign_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='foreign_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='foreign_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='foreign_currency_transaction_fee',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='foreign_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='foreign_points_lot',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='foreign_points_multiplier',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='gender_req',\n field=models.CharField(default='M', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_cold_storage_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_cold_storage_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_cold_storage_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_giant_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_giant_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_giant_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_ntuc_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_ntuc_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_ntuc_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_others_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_others_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_others_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_overall_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_sheng_siong_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_sheng_siong_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='groceries_sheng_siong_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='money_smart_link',\n field=models.CharField(default='https://www.google.com', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='multiple_levels',\n field=models.PositiveSmallIntegerField(default=0),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='creditcards',\n name='official_link',\n field=models.CharField(default='https://www.google.com', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_hotels_and_flight_points_multiplier',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_hotels_and_flights_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_hotels_and_flights_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_hotels_and_flights_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_hotels_and_flights_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_hotels_and_flights_points_lot',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_others_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_others_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_others_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_others_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_others_points_lot',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_others_points_multiplier',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='online_shopping_overall_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='overall_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='overall_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='overall_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='overall_points_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='payment_networks',\n field=models.CharField(default='Unknown Payment Network', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_caltex_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_caltex_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_caltex_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_esso_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_esso_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_esso_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_others_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_others_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_others_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_overal_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_shell_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_shell_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='petrol_shell_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='points_to_miles_conversion',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='retail_shopping_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='retail_shopping_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='retail_shopping_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='retail_shopping_points_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='retail_shopping_points_lot',\n field=models.CharField(default='1', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='retail_shopping_points_multiplier',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='transport_cashback_cap',\n field=models.CharField(default='999999999', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='transport_cashback_min_spend',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AddField(\n model_name='creditcards',\n name='transport_cashback_rate',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AlterField(\n model_name='creditcards',\n name='age_max',\n field=models.CharField(default='999', max_length=200),\n ),\n migrations.AlterField(\n model_name='creditcards',\n name='age_min',\n field=models.CharField(default='0', max_length=200),\n ),\n migrations.AlterField(\n model_name='creditcards',\n name='annual_income_foreigner_min',\n field=models.CharField(default='30000', max_length=200),\n ),\n migrations.AlterField(\n model_name='creditcards',\n name='annual_income_pr_min',\n field=models.CharField(default='30000', max_length=200),\n ),\n migrations.AlterField(\n model_name='creditcards',\n name='annual_income_singaporean_min',\n field=models.CharField(default='30000', max_length=200),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6384615302085876,
"alphanum_fraction": 0.6692307591438293,
"avg_line_length": 25,
"blob_id": "921205dbc56b14a0be58dc15bb4d9b80544755c9",
"content_id": "87f4d61f55d9c663db2673c1bb94dfd94f6119c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 130,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 5,
"path": "/SystemCode/venv/MRCard-env/bin/pydoc3.6",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "#!/home/iss-user/MTech19/MRCard/SystemCode/venv/MRCard-env/bin/python3.6\n\nimport pydoc\nif __name__ == '__main__':\n pydoc.cli()\n"
},
{
"alpha_fraction": 0.6620689630508423,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 28,
"blob_id": "e3d164f6df7e988eabb9b8f3ccc3f9f7df1e2fec",
"content_id": "230954b714f15664d00f5e44351cfe675db3b4ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 5,
"path": "/SystemCode/venv/MRCard-env/bin/idle3.6",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "#!/home/iss-user/MTech19/MRCard/SystemCode/venv/MRCard-env/bin/python3.6\n\nfrom idlelib.pyshell import main\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6645447611808777,
"alphanum_fraction": 0.6669178605079651,
"avg_line_length": 65.41746520996094,
"blob_id": "0933b85af12dee55216873b09c201c20ef20fd4c",
"content_id": "f02d4e57419f6a64ee2b6044dc778db3547e6b02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 62366,
"license_type": "no_license",
"max_line_length": 233,
"num_lines": 939,
"path": "/SystemCode/Recommender/rules.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "from random import choice\nfrom pyknow import *\nfrom .fuzzy_logic import *\n\ndef return_eligibile_credit_card_ids(dict_of_personal_info, list_of_dict_of_credit_card_eligibility_info, debug=False):\n\n eligible_card_ids = {'eligible_credit_card_ids':[]}\n\n for row in list_of_dict_of_credit_card_eligibility_info:\n class Person(Fact):\n age = Field(int)\n gender = Field(str)\n citizenship = Field(str)\n annual_income = Field(int)\n total_spending_amount = Field(int)\n pass\n\n class Eligibility(KnowledgeEngine):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.eligible_status = True\n\n # Applicant is too young\n @Rule(AS.v << Person(data__age = MATCH.data__age),\n TEST(lambda data__age: data__age < age__min), salience=1)\n def too_young(self):\n print(\"Cardid %s, Too young, less than %s\" %(cardid, age__min))\n self.eligible_status = False\n self.halt()\n\n # Applicant is too old\n @Rule(AS.v << Person(data__age = MATCH.data__age),\n TEST(lambda data__age: data__age > age__max), salience=1)\n def too_old(self):\n print(\"Cardid %s, Too old, more than %s\" %(cardid, age__max))\n self.eligible_status = False\n self.halt() \n\n # Applicant is wrong gender\n @Rule(AS.v << Person(data__gender = MATCH.data__gender),\n TEST(lambda data__gender: (data__gender == 'male') & (gender__req == 'F' or gender__req == 'f')))\n def wrong_gender(self):\n print(\"Cardid %s, Wrong gender, this card is only for %s\" %(cardid, gender__req))\n self.eligible_status = False\n self.halt()\n\n # Applicant is Singaporean and spends less than the minimum requirement\n @Rule(AND(\n AS.v << Person(data__citizenship = MATCH.data__citizenship),\n TEST(lambda data__citizenship: data__citizenship == 'singaporean'),\n AS.v << Person(data__annual_income = MATCH.data__annual_income),\n TEST(lambda data__annual_income: data__annual_income < annual_income_singaporean_min), \n ))\n def singaporean_too_poor(self):\n print(\"Cardid %s, Singaporean, does not meet min spending of %s\" %(cardid, annual_income_singaporean_min))\n self.eligible_status = False\n self.halt()\n\n # Applicant is pr and spends less than the minimum requirement\n @Rule(AND(\n AS.v << Person(data__citizenship = MATCH.data__citizenship),\n TEST(lambda data__citizenship: data__citizenship == 'pr'),\n AS.v << Person(data__annual_income = MATCH.data__annual_income),\n TEST(lambda data__annual_income: data__annual_income < annual_income_pr_min), \n ))\n def pr_too_poor(self):\n print(\"Cardid %s, PR, does not meet min spending of %s\" %(cardid, annual_income_pr_min))\n self.eligible_status = False\n self.halt()\n\n # Applicant is Foreigner and spends less than the minimum requirement\n @Rule(AND(\n AS.v << Person(data__citizenship = MATCH.data__citizenship),\n TEST(lambda data__citizenship: data__citizenship == 'foreigner'),\n AS.v << Person(data__annual_income = MATCH.data__annual_income),\n TEST(lambda data__annual_income: data__annual_income < annual_income_foreigner_min), \n ))\n def foreigner_too_poor(self):\n print(\"Cardid %s, Foreigner, does not meet min spending of %s\" %(cardid, annual_income_foreigner_min))\n self.eligible_status = False\n self.halt()\n \n cardid = str(row['credit_card_id'][0])\n credit_card_name = str(row['credit_card_name'][0])\n age__min = row['age_min'][0]\n age__max = row['age_max'][0]\n gender__req = row['gender_req'][0]\n annual_income_singaporean_min = row['annual_income_singaporean_min'][0]\n annual_income_pr_min = row['annual_income_pr_min'][0]\n annual_income_foreigner_min = row['annual_income_foreigner_min'][0]\n if debug:\n print(\"Applicant Info:\", dict_of_personal_info)\n print(\"Card Info: ID is %s, Name is %s, age_min is %f, age_max is %f, gender is %s, annual_income_singaporean_min is %f, annual_income_pr_min is %f, annual_income_foreigner_min is %f\"\n %(cardid, credit_card_name, age__min, age__max, gender__req, annual_income_singaporean_min, annual_income_pr_min, annual_income_foreigner_min))\n engine=Eligibility()\n engine.reset()\n engine.declare(Person(data=dict_of_personal_info))\n engine.run()\n engine.facts\n if debug:\n print(engine.eligible_status)\n print(\"\\n\")\n if engine.eligible_status:\n eligible_card_ids['eligible_credit_card_ids'].append(cardid)\n return eligible_card_ids\n\ndef return_preferred_credit_card_ids(dict_of_preference_info, list_of_dict_of_credit_card_preference_info, debug=False):\n preferred_credit_card_ids = {'preferred_credit_card_ids':[]}\n \n for row in list_of_dict_of_credit_card_preference_info:\n class PreferenceInput(Fact):\n preferred_bank = Field(str)\n preferred_card_type = Field(str)\n preferred_rewards_type = Field(str)\n pass\n\n class Preference(KnowledgeEngine):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.prefered = None\n # If any part of Credit Card's Bank is within preferred list (if sets intersect)\n # AND if any part of Credit Card's payment network is within preferred paymnent network (if sets intersect)\n # AND if any part of Credit Card reward type is within preferred reward card type (if sets intersect)\n @Rule(AND(AS.v << PreferenceInput(data__preferred_bank = MATCH.data__preferred_bank),\n TEST(lambda data__preferred_bank: bool(set(data__preferred_bank) & set(bank_name))),\n AS.v << PreferenceInput(data__preferred_card_type = MATCH.data__preferred_card_type),\n TEST(lambda data__preferred_card_type: bool(set(data__preferred_card_type) & set(payment_networks))),\n AS.v << PreferenceInput(data__preferred_rewards_type = MATCH.data__preferred_rewards_type),\n TEST(lambda data__preferred_rewards_type: bool(set(data__preferred_rewards_type) & set(card_type))) ))\n def bank_within_preferred_and_in_payment_network(self, v):\n print(\"Cardid %s, %s is a preferred bank and %s payment network intersects and %s rewards type intersects\" %(cardid, bank_name[0], ' '.join(payment_networks), ' '.join(card_type)))\n self.prefered = True\n self.halt()\n \n # Credit Card's Bank is NOT within preferred list\n @Rule(AS.v << PreferenceInput(data__preferred_bank = MATCH.data__preferred_bank),\n TEST(lambda data__preferred_bank: bool(set(data__preferred_bank) & set(bank_name)) == False), salience=10)\n def bank_not_within_preferred(self, v):\n print(\"Cardid %s, %s is not a preferred bank\" %(cardid, bank_name))\n self.prefered = False\n self.halt()\n \n # Credit Card's Bank is NOT within payment network\n @Rule(AS.v << PreferenceInput(data__preferred_card_type = MATCH.data__preferred_card_type),\n TEST(lambda data__preferred_card_type: bool(set(payment_networks) & set(data__preferred_card_type)) == False), salience=10)\n def credit_card_not_in_payment_network(self, v):\n print(\"Cardid %s, %s not within preferred payment network \" %(cardid,' '.join(payment_networks)))\n self.prefered = False\n self.halt()\n \n # Credit Card's Reward Type is NOT within preferred reward type\n @Rule(AS.v << PreferenceInput(data__preferred_rewards_type = MATCH.data__preferred_rewards_type),\n TEST(lambda data__preferred_rewards_type: bool(set(card_type) & set(data__preferred_rewards_type)) == False), salience=10)\n def credit_card_not_in_rewards_type(self, v):\n print(\"Cardid %s, %s not within preferred rewards type \" %(cardid,' '.join(card_type)))\n self.prefered = False\n self.halt()\n \n cardid = str(row['credit_card_id'][0])\n credit_card_name = str(row['credit_card_name'][0])\n bank_name = row['bank_name']\n payment_networks = row['payment_networks']\n card_type = row['card_type']\n \n engine = Preference()\n engine.reset()\n engine.declare(PreferenceInput(data=dict_of_preference_info))\n engine.run()\n if debug: \n print(row)\n print(preference_info)\n print(engine.prefered)\n print(\"\\n\")\n if engine.prefered:\n preferred_credit_card_ids['preferred_credit_card_ids'].append(cardid)\n return preferred_credit_card_ids\n\ndef return_eligible_spendings_for_breakdown(dict_of_spending_checkbox_info, debug=False):\n eligible_spending = {'eligible_spending':[]}\n for key in dict_of_spending_checkbox_info:\n val = dict_of_spending_checkbox_info[key]\n if val == 1:\n eligible_spending['eligible_spending'].append('_'.join(key.split(\"_\")[0:-1]))\n return eligible_spending\n\n\ndef return_cashback_value(dict_of_spending_amounts_info, dict_of_preferred_credit_card_spending_rewards_info, contactless_CF, debug=False):\n \n cashback_value = 0\n \n dictt = dict_of_spending_amounts_info\n row = dict_of_preferred_credit_card_spending_rewards_info\n total_spending = sum([dict_of_spending_amounts_info[key] for key in dict_of_spending_amounts_info.keys()])\n multiple_levels = row['multiple_levels'][0]\n overall_cashback_min_spend = row['overall_cashback_min_spend']\n if debug:\n print(dict_of_spending_amounts_info)\n print(dict_of_preferred_credit_card_spending_rewards_info)\n print(\"Overall Cashback Min Spend\", overall_cashback_min_spend)\n print(\"Multiple Levels\", multiple_levels)\n \n ## Get index if there are multiple levels ##\n if multiple_levels == 0:\n index = 0\n if total_spending < overall_cashback_min_spend[0]:\n print(\"Did not hit the overall minimum spending, no cashback given\")\n cashback_value = 0\n return cashback_value\n elif multiple_levels == 1:\n if total_spending < overall_cashback_min_spend[0]:\n print(\"Did not hit the overall minimum spending, no cashback given\")\n cashback_value = 0\n return cashback_value\n for i in range(len(overall_cashback_min_spend)):\n if total_spending >= overall_cashback_min_spend[i]:\n index = i\n else:\n print(\"ERROR\")\n \n cardid = str(row['credit_card_id'][0])\n credit_card_name = str(row['credit_card_name'][0])\n overall_cashback_cap = row['overall_cashback_cap'][index]\n contactless_cashback_rate = row['contactless_cashback_rate'][index]\n contactless_cashback_cap = row['contactless_cashback_cap'][index]\n contactless_cashback_min = row['contactless_cashback_min_spend'][index]\n bill_cashback_rate = row['bill_cashback_rate'][index]\n bill_cashback_cap = row['bill_cashback_cap'][index]\n bill_cashback_min_spend = row['bill_cashback_min_spend'][index]\n dining_cashback_rate = row['dining_cashback_rate'][index]\n dining_cashback_cap = row['dining_cashback_cap'][index]\n dining_cashback_min_spend = row['dining_cashback_min_spend'][index]\n foreign_cashback_rate = row['foreign_cashback_rate'][index]\n foreign_cashback_cap = row['foreign_cashback_cap'][index]\n foreign_cashback_min_spend = row['foreign_cashback_min_spend'][index]\n retail_shopping_cashback_rate = row['retail_shopping_cashback_rate'][index]\n retail_shopping_cashback_cap = row['retail_shopping_cashback_cap'][index]\n retail_shopping_cashback_min_spend = row['retail_shopping_cashback_min_spend'][index]\n transport_cashback_rate = row['transport_cashback_rate'][index]\n transport_cashback_cap = row['transport_cashback_cap'][index]\n transport_cashback_min_spend = row['transport_cashback_min_spend'][index]\n groceries_overall_cashback_cap = row['groceries_overall_cashback_cap'][index]\n groceries_others_cashback_rate = row['groceries_others_cashback_rate'][index]\n groceries_others_cashback_cap = row['groceries_others_cashback_cap'][index]\n groceries_others_cashback_min_spend = row['groceries_others_cashback_min_spend'][index]\n groceries_ntuc_cashback_rate = row['groceries_ntuc_cashback_rate'][index]\n groceries_ntuc_cashback_cap = row['groceries_ntuc_cashback_cap'][index]\n groceries_ntuc_cashback_min_spend = row['groceries_ntuc_cashback_min_spend'][index]\n groceries_sheng_siong_cashback_rate = row['groceries_sheng_siong_cashback_rate'][index]\n groceries_sheng_siong_cashback_cap = row['groceries_sheng_siong_cashback_cap'][index]\n groceries_sheng_siong_cashback_min_spend = row['groceries_sheng_siong_cashback_min_spend'][index]\n groceries_cold_storage_cashback_rate = row['groceries_cold_storage_cashback_rate'][index]\n groceries_cold_storage_cashback_cap = row['groceries_cold_storage_cashback_cap'][index]\n groceries_cold_storage_cashback_min_spend = row['groceries_cold_storage_cashback_min_spend'][index]\n groceries_giant_cashback_rate = row['groceries_giant_cashback_rate'][index]\n groceries_giant_cashback_cap = row['groceries_giant_cashback_cap'][index]\n groceries_giant_cashback_min_spend = row['groceries_giant_cashback_min_spend'][index]\n online_shopping_overall_cashback_cap = row['online_shopping_overall_cashback_cap'][index]\n online_shopping_others_cashback_rate = row['online_shopping_others_cashback_rate'][index]\n online_shopping_others_cashback_cap = row['online_shopping_others_cashback_cap'][index]\n online_shopping_others_cashback_min_spend = row['online_shopping_others_cashback_min_spend'][index]\n online_shopping_hotels_and_flights_cashback_rate = row['online_shopping_hotels_and_flights_cashback_rate'][index]\n online_shopping_hotels_and_flights_cashback_cap = row['online_shopping_hotels_and_flights_cashback_cap'][index]\n online_shopping_hotels_and_flights_cashback_min_spend = row['online_shopping_hotels_and_flights_cashback_min_spend'][index]\n petrol_overal_cashback_cap = row['petrol_overal_cashback_cap'][index]\n petrol_others_cashback_rate = row['petrol_others_cashback_rate'][index]\n petrol_others_cashback_cap = row['petrol_others_cashback_cap'][index]\n petrol_others_cashback_min_spend = row['petrol_others_cashback_min_spend'][index]\n petrol_esso_cashback_rate = row['petrol_esso_cashback_rate'][index]\n petrol_esso_cashback_cap = row['petrol_esso_cashback_cap'][index]\n petrol_esso_cashback_min_spend = row['petrol_esso_cashback_min_spend'][index]\n petrol_caltex_cashback_rate = row['petrol_caltex_cashback_rate'][index]\n petrol_caltex_cashback_cap = row['petrol_caltex_cashback_cap'][index]\n petrol_caltex_cashback_min_spend = row['petrol_caltex_cashback_min_spend'][index]\n petrol_shell_cashback_rate = row['petrol_shell_cashback_rate'][index]\n petrol_shell_cashback_cap = row['petrol_shell_cashback_cap'][index]\n petrol_shell_cashback_min_spend = row['petrol_shell_cashback_min_spend'][index]\n \n bill_spending_ = dictt['bill_spending']\n dining_spending_ = dictt['dining_spending']\n foreign_spending_ = dictt['foreign_spending']\n retail_shopping_spending_ = dictt['retail_shopping_spending']\n transport_spending_ = dictt['transport_spending']\n groceries_others_spending_ = dictt['groceries_others_spending']\n groceries_ntuc_spending_ = dictt['groceries_ntuc_spending']\n groceries_sheng_siong_spending_ = dictt['groceries_sheng_siong_spending']\n groceries_cold_storage_spending_ = dictt['groceries_cold_storage_spending']\n groceries_giant_spending_ = dictt['groceries_giant_spending']\n online_shopping_others_spending_ = dictt['online_shopping_others_spending']\n online_shopping_hotels_and_flight_spending_ = dictt['online_shopping_hotels_and_flight_spending']\n petrol_others_spending_ = dictt['petrol_others_spending']\n petrol_esso_spending_ = dictt['petrol_esso_spending']\n petrol_shell_spending_ = dictt['petrol_shell_spending']\n petrol_caltex_spending_ = dictt['petrol_caltex_spending']\n\n class SpendingInput(Fact):\n bill_spending = Field(float)\n dining_spending = Field(float)\n foreign_spending = Field(float)\n retail_shopping_spending = Field(float)\n transport_spending = Field(float)\n groceries_others_spending = Field(float)\n groceries_ntuc_spending = Field(float)\n groceries_sheng_siong_spending = Field(float)\n groceries_cold_storage_spending = Field(float)\n groceries_giant_spending = Field(float)\n online_shopping_others_spending = Field(float)\n online_shopping_hotels_and_flight_spending = Field(float)\n petrol_others_spending = Field(float)\n petrol_esso_spending = Field(float)\n petrol_shell_spending = Field(float)\n petrol_caltex_spending = Field(float)\n pass \n \n class Cashback(KnowledgeEngine):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.cashback = 0\n self.bill_cashback = 0\n self.cashback_dining = 0\n self.cashback_foreign = 0\n self.cashback_foreign = 0\n self.cashback_retail = 0\n self.cashback_transport = 0\n self.cashback_groceries_others = 0\n self.cashback_groceries_ntuc = 0\n self.cashback_groceries_sheng_siong = 0\n self.cashback_groceries_cold_storage = 0\n self.cashback_groceries_giant = 0\n self.cashback_online_others = 0\n self.cashback_online_hotels_flights = 0\n self.cashback_petrol_others = 0\n self.cashback_petrol_esso = 0\n self.cashback_petrol_shell = 0\n self.cashback_petrol_caltex = 0\n \n #Bill Spending Cashback\n @Rule(AND(AS.v << SpendingInput(data__bill_spending = MATCH.data__bill_spending),\n TEST(lambda data__bill_spending: data__bill_spending >= bill_cashback_min_spend),\n AS.v << SpendingInput(data__bill_spending = MATCH.data__bill_spending),\n TEST(lambda data__bill_spending: data__bill_spending * bill_cashback_rate <= bill_cashback_cap)))\n def cashback_bill_1(self, v):\n self.bill_cashback = bill_spending_*bill_cashback_rate\n print(\"bill cashback within cap amount, %f earned\" %(self.bill_cashback))\n @Rule(AND(AS.v << SpendingInput(data__bill_spending = MATCH.data__bill_spending),\n TEST(lambda data__bill_spending: data__bill_spending >= bill_cashback_min_spend),\n AS.v << SpendingInput(data__bill_spending = MATCH.data__bill_spending),\n TEST(lambda data__bill_spending: data__bill_spending * bill_cashback_rate > bill_cashback_cap)))\n def cashback_bill_2(self, v):\n self.bill_cashback = bill_cashback_cap\n print(\"bill cashback exceed cap amount, %f earned\" %(self.bill_cashback))\n \n #Dining Spending Cashback\n @Rule(AND(AS.v << SpendingInput(data__dining_spending = MATCH.data__dining_spending),\n TEST(lambda data__dining_spending: data__dining_spending >= dining_cashback_min_spend),\n AS.v << SpendingInput(data__dining_spending = MATCH.data__dining_spending),\n TEST(lambda data__dining_spending: data__dining_spending * dining_cashback_rate <= dining_cashback_cap)))\n def cashback_dining_1(self, v):\n self.cashback_dining = dining_spending_*dining_cashback_rate\n print(\"dining cashback within cap amount, %f earned\" %(self.cashback_dining))\n @Rule(AND(AS.v << SpendingInput(data__dining_spending = MATCH.data__dining_spending),\n TEST(lambda data__dining_spending: data__dining_spending >= dining_cashback_min_spend),\n AS.v << SpendingInput(data__dining_spending = MATCH.data__dining_spending),\n TEST(lambda data__dining_spending: data__dining_spending * dining_cashback_rate > dining_cashback_cap)))\n def cashback_dining_2(self, v):\n self.cashback_dining = dining_cashback_cap\n print(\"dining cashback exceed cap amount, %f earned\" %(self.cashback_dining))\n \n #foreign spending cashback\n @Rule(AND(AS.v << SpendingInput(data__foreign_spending = MATCH.data__foreign_spending),\n TEST(lambda data__foreign_spending: data__foreign_spending >= foreign_cashback_min_spend),\n AS.v << SpendingInput(data__foreign_spending = MATCH.data__foreign_spending),\n TEST(lambda data__foreign_spending: data__foreign_spending * foreign_cashback_rate <= foreign_cashback_cap)))\n def cashback_foreign_1(self, v):\n self.cashback_foreign = foreign_spending_*foreign_cashback_rate\n print(\"foreign cashback within cap amount, %f earned\" %(self.cashback_foreign))\n @Rule(AND(AS.v << SpendingInput(data__foreign_spending = MATCH.data__foreign_spending),\n TEST(lambda data__foreign_spending: data__foreign_spending >= foreign_cashback_min_spend),\n AS.v << SpendingInput(data__foreign_spending = MATCH.data__foreign_spending),\n TEST(lambda data__foreign_spending: data__foreign_spending * foreign_cashback_rate > foreign_cashback_cap)))\n def cashback_foreign_2(self, v):\n self.cashback_foreign = foreign_cashback_cap\n print(\"foreign cashback exceed cap amount, %f earned\" %(self.cashback_foreign))\n\n #retail spending cashback\n @Rule(AND( AS.v << SpendingInput(data__retail_spending = MATCH.data__retail_spending),\n TEST(lambda data__retail_spending: data__retail_spending >= retail_cashback_min_spend),\n AS.v << SpendingInput(data__retail_spending = MATCH.data__retail_spending),\n TEST(lambda data__retail_spending: data__retail_spending * retail_cashback_rate <= retail_cashback_cap)))\n def cashback_retail_1(self, v):\n self.cashback_retail = retail_spending_*retail_cashback_rate\n print(\"retail cashback within cap amount, %f earned\" %(self.cashback_retail))\n @Rule(AND(AS.v << SpendingInput(data__retail_spending = MATCH.cashback_retail),\n TEST(lambda data__retail_spending: data__retail_spending >= retail_cashback_min_spend),\n AS.v << SpendingInput(data__retail_spending = MATCH.data__retail_spending),\n TEST(lambda data__retail_spending: data__retail_spending * retail_cashback_rate > retail_cashback_cap)))\n def cashback_retail_2(self, v):\n self.cashback_retail = retail_cashback_cap\n print(\"retail cashback exceed cap amount, %f earned\" %(self.cashback_retail))\n\n #transport spending cashback\n @Rule(AND(AS.v << SpendingInput(data__transport_spending = MATCH.data__transport_spending),\n TEST(lambda data__transport_spending: data__transport_spending >= transport_cashback_min_spend),\n AS.v << SpendingInput(data__transport_spending = MATCH.data__transport_spending),\n TEST(lambda data__transport_spending: data__transport_spending * transport_cashback_rate <= transport_cashback_cap)))\n def cashback_transport_1(self, v):\n self.cashback_transport = transport_spending_*transport_cashback_rate\n print(\"transport cashback within cap amount, %f earned\" %(self.cashback_transport))\n @Rule(AND(AS.v << SpendingInput(data__transport_spending = MATCH.data__transport_spending),\n TEST(lambda data__transport_spending: data__transport_spending >= transport_cashback_min_spend),\n AS.v << SpendingInput(data__transport_spending = MATCH.data__transport_spending),\n TEST(lambda data__transport_spending: data__transport_spending * transport_cashback_rate > transport_cashback_cap)))\n def cashback_transport_2(self, v):\n self.cashback_transport = transport_cashback_cap\n print(\"transport cashback exceed cap amount, %f earned\" %(self.cashback_transport))\n \n #groceries_others spending cashback\n @Rule(AND(AS.v << SpendingInput(data__groceries_others_spending = MATCH.data__groceries_others_spending),\n TEST(lambda data__groceries_others_spending: data__groceries_others_spending >= groceries_others_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_others_spending = MATCH.data__groceries_others_spending),\n TEST(lambda data__groceries_others_spending: data__groceries_others_spending * groceries_others_cashback_rate <= groceries_others_cashback_cap)))\n def groceries_others_1(self, v):\n self.cashback_groceries_others = groceries_others_spending_*groceries_others_cashback_rate\n print(\"groceries_others cashback within cap amount, %f earned\" %(self.cashback_groceries_others))\n @Rule(AND(AS.v << SpendingInput(data__groceries_others_spending = MATCH.data__groceries_others_spending),\n TEST(lambda data__groceries_others_spending: data__groceries_others_spending >= groceries_others_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_others_spending = MATCH.data__groceries_others_spending),\n TEST(lambda data__groceries_others_spending: data__groceries_others_spending * groceries_others_cashback_rate > groceries_others_cashback_cap)))\n def groceries_others_2(self, v):\n self.cashback_groceries_others = groceries_others_cashback_cap\n print(\"groceries_others cashback exceed cap amount, %f earned\" %(self.cashback_groceries_others))\n \n #groceries_ntuc spending cashback\n @Rule(AND(AS.v << SpendingInput(data__groceries_ntuc_spending = MATCH.data__groceries_ntuc_spending),\n TEST(lambda data__groceries_ntuc_spending: data__groceries_ntuc_spending >= groceries_ntuc_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_ntuc_spending = MATCH.data__groceries_ntuc_spending),\n TEST(lambda data__groceries_ntuc_spending: data__groceries_ntuc_spending * groceries_ntuc_cashback_rate <= groceries_ntuc_cashback_cap)))\n def groceries_ntuc_1(self, v):\n self.cashback_groceries_ntuc = groceries_ntuc_spending_*groceries_ntuc_cashback_rate\n print(\"groceries_ntuc cashback within cap amount, %f earned\" %(self.cashback_groceries_ntuc))\n @Rule(AND(AS.v << SpendingInput(data__groceries_ntuc_spending = MATCH.data__groceries_ntuc_spending),\n TEST(lambda data__groceries_ntuc_spending: data__groceries_ntuc_spending >= groceries_ntuc_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_ntuc_spending = MATCH.data__groceries_ntuc_spending),\n TEST(lambda data__groceries_ntuc_spending: data__groceries_ntuc_spending * groceries_ntuc_cashback_rate > groceries_ntuc_cashback_cap)))\n def groceries_ntuc_2(self, v):\n self.cashback_groceries_ntuc = groceries_ntuc_cashback_cap\n print(\"groceries_ntuc cashback exceed cap amount, %f earned\" %(self.cashback_groceries_ntuc))\n\n #groceries_sheng_siong spending cashback\n @Rule(AND(AS.v << SpendingInput(data__groceries_sheng_siong_spending = MATCH.data__groceries_sheng_siong_spending),\n TEST(lambda data__groceries_sheng_siong_spending: data__groceries_sheng_siong_spending >= groceries_sheng_siong_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_sheng_siong_spending = MATCH.data__groceries_sheng_siong_spending),\n TEST(lambda data__groceries_sheng_siong_spending: data__groceries_sheng_siong_spending * groceries_sheng_siong_cashback_rate <= groceries_sheng_siong_cashback_cap)))\n def groceries_sheng_siong_1(self, v):\n self.cashback_groceries_sheng_siong = groceries_sheng_siong_spending_*groceries_sheng_siong_cashback_rate\n print(\"groceries_sheng_siong cashback within cap amount, %f earned\" %(self.cashback_groceries_sheng_siong))\n @Rule(AND(AS.v << SpendingInput(data__groceries_sheng_siong_spending = MATCH.data__groceries_sheng_siong_spending),\n TEST(lambda data__groceries_sheng_siong_spending: data__groceries_sheng_siong_spending >= groceries_sheng_siong_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_sheng_siong_spending = MATCH.data__groceries_sheng_siong_spending),\n TEST(lambda data__groceries_sheng_siong_spending: data__groceries_sheng_siong_spending * groceries_sheng_siong_cashback_rate > groceries_sheng_siong_cashback_cap)))\n def groceries_sheng_siong_2(self, v):\n self.cashback_groceries_sheng_siong = groceries_sheng_siong_cashback_cap\n print(\"groceries_sheng_siong cashback exceed cap amount, %f earned\" %(self.cashback_groceries_sheng_siong))\n\n #groceries_cold_storage spending cashback\n @Rule(AND(AS.v << SpendingInput(data__groceries_cold_storage_spending = MATCH.data__groceries_cold_storage_spending),\n TEST(lambda data__groceries_cold_storage_spending: data__groceries_cold_storage_spending >= groceries_cold_storage_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_cold_storage_spending = MATCH.data__groceries_cold_storage_spending),\n TEST(lambda data__groceries_cold_storage_spending: data__groceries_cold_storage_spending * groceries_cold_storage_cashback_rate <= groceries_cold_storage_cashback_cap)))\n def groceries_cold_storage_1(self, v):\n self.cashback_groceries_cold_storage = groceries_cold_storage_spending_*groceries_cold_storage_cashback_rate\n print(\"groceries_cold_storage cashback within cap amount, %f earned\" %(self.cashback_groceries_cold_storage))\n @Rule(AND(AS.v << SpendingInput(data__groceries_cold_storage_spending = MATCH.data__groceries_cold_storage_spending),\n TEST(lambda data__groceries_cold_storage_spending: data__groceries_cold_storage_spending >= groceries_cold_storage_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_cold_storage_spending = MATCH.data__groceries_cold_storage_spending),\n TEST(lambda data__groceries_cold_storage_spending: data__groceries_cold_storage_spending * groceries_cold_storage_cashback_rate > groceries_cold_storage_cashback_cap)))\n def groceries_cold_storage_2(self, v):\n self.cashback_groceries_cold_storage = groceries_cold_storage_cashback_cap\n print(\"groceries_cold_storage cashback exceed cap amount, %f earned\" %(self.cashback_groceries_cold_storage))\n\n\n #groceries_giant spending cashback\n @Rule(AND(AS.v << SpendingInput(data__groceries_giant_spending = MATCH.data__groceries_giant_spending),\n TEST(lambda data__groceries_giant_spending: data__groceries_giant_spending >= groceries_giant_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_giant_spending = MATCH.data__groceries_giant_spending),\n TEST(lambda data__groceries_giant_spending: data__groceries_giant_spending * groceries_giant_cashback_rate <= groceries_giant_cashback_cap)))\n def groceries_giant_1(self, v):\n self.cashback_groceries_giant = groceries_giant_spending_*groceries_giant_cashback_rate\n print(\"groceries_giant cashback within cap amount, %f earned\" %(self.cashback_groceries_giant))\n @Rule(AND(AS.v << SpendingInput(data__groceries_giant_spending = MATCH.data__groceries_giant_spending),\n TEST(lambda data__groceries_giant_spending: data__groceries_giant_spending >= groceries_giant_cashback_min_spend),\n AS.v << SpendingInput(data__groceries_giant_spending = MATCH.data__groceries_giant_spending),\n TEST(lambda data__groceries_giant_spending: data__groceries_giant_spending * groceries_giant_cashback_rate > groceries_giant_cashback_cap)))\n def groceries_giant_2(self, v):\n self.cashback_groceries_giant = groceries_giant_cashback_cap\n print(\"groceries_giant cashback exceed cap amount, %f earned\" %(self.cashback_groceries_giant))\n \n #online_others spending cashback\n @Rule(AND(AS.v << SpendingInput(data__online_shopping_others_spending = MATCH.data__online_shopping_others_spending),\n TEST(lambda data__online_shopping_others_spending: data__online_shopping_others_spending >= online_shopping_others_cashback_min_spend),\n AS.v << SpendingInput(data__online_shopping_others_spending = MATCH.data__online_shopping_others_spending),\n TEST(lambda data__online_shopping_others_spending: data__online_shopping_others_spending * online_shopping_others_cashback_rate <= online_shopping_others_cashback_cap)))\n def online_others_1(self, v):\n self.cashback_online_others = online_shopping_others_spending_*online_shopping_others_cashback_rate\n print(\"online_others cashback within cap amount, %f earned\" %(self.cashback_online_others))\n @Rule(AND(AS.v << SpendingInput(data__online_shopping_others_spending = MATCH.data__online_shopping_others_spending),\n TEST(lambda data__online_shopping_others_spending: data__online_shopping_others_spending >= online_shopping_others_cashback_min_spend),\n AS.v << SpendingInput(data__online_shopping_others_spending = MATCH.data__online_shopping_others_spending),\n TEST(lambda data__online_shopping_others_spending: data__online_shopping_others_spending * online_shopping_others_cashback_rate > online_shopping_others_cashback_cap)))\n def online_others_2(self, v):\n self.cashback_online_others = online_shopping_others_cashback_cap\n print(\"online_others cashback exceed cap amount, %f earned\" %(self.cashback_online_others))\n\n #online_hotels_flights spending cashback\n @Rule(AND(AS.v << SpendingInput(data__online_shopping_hotels_and_flight_spending = MATCH.data__online_shopping_hotels_and_flight_spending),\n TEST(lambda data__online_shopping_hotels_and_flight_spending: data__online_shopping_hotels_and_flight_spending >= online_shopping_hotels_and_flights_cashback_min_spend),\n AS.v << SpendingInput(data__online_shopping_hotels_and_flight_spending = MATCH.data__online_shopping_hotels_and_flight_spending),\n TEST(lambda data__online_shopping_hotels_and_flight_spending: data__online_shopping_hotels_and_flight_spending * online_shopping_hotels_and_flights_cashback_rate <= online_shopping_hotels_and_flights_cashback_cap)))\n def online_hotels_flights_1(self, v):\n self.cashback_online_hotels_flights = online_shopping_hotels_and_flight_spending_*online_shopping_hotels_and_flights_cashback_rate\n print(\"online_hotels_flights cashback within cap amount, %f earned\" %(self.cashback_online_hotels_flights))\n @Rule(AND(AS.v << SpendingInput(data__online_shopping_hotels_and_flight_spending = MATCH.data__online_shopping_hotels_and_flight_spending),\n TEST(lambda data__online_shopping_hotels_and_flight_spending: data__online_shopping_hotels_and_flight_spending >= online_shopping_hotels_and_flights_cashback_min_spend),\n AS.v << SpendingInput(data__online_shopping_hotels_and_flight_spending = MATCH.data__online_shopping_hotels_and_flight_spending),\n TEST(lambda data__online_shopping_hotels_and_flight_spending: data__online_shopping_hotels_and_flight_spending * online_shopping_hotels_and_flights_cashback_rate > online_shopping_hotels_and_flights_cashback_cap)))\n def online_hotels_flights_2(self, v):\n self.cashback_online_hotels_flights = online_shopping_others_cashback_cap\n print(\"online_hotels_flights cashback exceed cap amount, %f earned\" %(self.cashback_online_hotels_flights))\n \n \n #petrol_others spending cashback\n @Rule(AND(AS.v << SpendingInput(data__petrol_others_spending = MATCH.data__petrol_others_spending),\n TEST(lambda data__petrol_others_spending: data__petrol_others_spending >= petrol_others_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_others_spending = MATCH.data__petrol_others_spending),\n TEST(lambda data__petrol_others_spending: data__petrol_others_spending * petrol_others_cashback_rate <= petrol_others_cashback_cap)))\n def petrol_others_1(self, v):\n self.cashback_petrol_others = petrol_others_spending_*petrol_others_cashback_rate\n print(\"petrol_others cashback within cap amount, %f earned\" %(self.cashback_petrol_others))\n @Rule(AND(AS.v << SpendingInput(data__petrol_others_spending = MATCH.data__petrol_others_spending),\n TEST(lambda data__petrol_others_spending: data__petrol_others_spending >= petrol_others_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_others_spending = MATCH.data__petrol_others_spending),\n TEST(lambda data__petrol_others_spending: data__petrol_others_spending * petrol_others_cashback_rate > petrol_others_cashback_cap)))\n def petrol_others_2(self, v):\n self.cashback_petrol_others = petrol_others_cashback_cap\n print(\"petrol_others cashback exceed cap amount, %f earned\" %(self.cashback_petrol_others))\n\n #petrol_esso spending cashback\n @Rule(AND(AS.v << SpendingInput(data__petrol_esso_spending = MATCH.data__petrol_esso_spending),\n TEST(lambda data__petrol_esso_spending: data__petrol_esso_spending >= petrol_esso_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_esso_spending = MATCH.data__petrol_esso_spending),\n TEST(lambda data__petrol_esso_spending: data__petrol_esso_spending * petrol_esso_cashback_rate <= petrol_esso_cashback_cap)))\n def petrol_esso_1(self, v):\n self.cashback_petrol_esso = petrol_esso_spending_*petrol_esso_cashback_rate\n print(\"petrol_esso cashback within cap amount, %f earned\" %(self.cashback_petrol_esso))\n @Rule(AND(AS.v << SpendingInput(data__petrol_esso_spending = MATCH.data__petrol_esso_spending),\n TEST(lambda data__petrol_esso_spending: data__petrol_esso_spending >= petrol_esso_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_esso_spending = MATCH.data__petrol_esso_spending),\n TEST(lambda data__petrol_esso_spending: data__petrol_esso_spending * petrol_esso_cashback_rate > petrol_esso_cashback_cap)))\n def petrol_esso_2(self, v):\n self.cashback_petrol_esso = petrol_esso_cashback_cap\n print(\"petrol_esso cashback exceed cap amount, %f earned\" %(self.cashback_petrol_esso))\n\n #petrol_shell spending cashback\n @Rule(AND(AS.v << SpendingInput(data__petrol_shell_spending = MATCH.data__petrol_shell_spending),\n TEST(lambda data__petrol_shell_spending: data__petrol_shell_spending >= petrol_shell_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_shell_spending = MATCH.data__petrol_shell_spending),\n TEST(lambda data__petrol_shell_spending: data__petrol_shell_spending * petrol_shell_cashback_rate <= petrol_shell_cashback_cap)))\n def petrol_shell_1(self, v):\n self.cashback_petrol_shell = petrol_shell_spending_*petrol_shell_cashback_rate\n print(\"petrol_shell cashback within cap amount, %f earned\" %(self.cashback_petrol_shell))\n @Rule(AND(AS.v << SpendingInput(data__petrol_shell_spending = MATCH.data__petrol_shell_spending),\n TEST(lambda data__petrol_shell_spending: data__petrol_shell_spending >= petrol_shell_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_shell_spending = MATCH.data__petrol_shell_spending),\n TEST(lambda data__petrol_shell_spending: data__petrol_shell_spending * petrol_shell_cashback_rate > petrol_shell_cashback_cap)))\n def petrol_shell_2(self, v):\n self.cashback_petrol_shell = petrol_shell_cashback_cap\n print(\"petrol_shell cashback exceed cap amount, %f earned\" %(self.cashback_petrol_shell))\n\n #petrol_caltex spending cashback\n @Rule(AND(AS.v << SpendingInput(data__petrol_caltex_spending = MATCH.data__petrol_caltex_spending),\n TEST(lambda data__petrol_caltex_spending: data__petrol_caltex_spending >= petrol_caltex_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_caltex_spending = MATCH.data__petrol_caltex_spending),\n TEST(lambda data__petrol_caltex_spending: petrol_caltex_spending_*petrol_caltex_cashback_rate <= petrol_caltex_cashback_cap)))\n def petrol_caltex_1(self, v):\n self.cashback_petrol_caltex = petrol_caltex_spending_*petrol_caltex_cashback_rate\n print(\"petrol_caltex cashback within cap amount, %f earned\" %(self.cashback_petrol_caltex))\n @Rule(AND(AS.v << SpendingInput(data__petrol_caltex_spending = MATCH.data__petrol_caltex_spending),\n TEST(lambda data__petrol_caltex_spending: data__petrol_caltex_spending >= petrol_caltex_cashback_min_spend),\n AS.v << SpendingInput(data__petrol_caltex_spending = MATCH.data__petrol_caltex_spending),\n TEST(lambda data__petrol_caltex_spending: petrol_caltex_spending_*petrol_caltex_cashback_rate > petrol_caltex_cashback_cap)))\n def petrol_caltex_2(self, v):\n self.cashback_petrol_caltex = petrol_caltex_cashback_cap\n print(\"petrol_caltex cashback exceed cap amount, %f earned\" %(self.cashback_petrol_caltex))\n \n engine=Cashback()\n engine.reset()\n engine.declare(SpendingInput(data=dict_of_spending_amounts_info))\n engine.run()\n engine.facts\n\n ##check for groceries overall\n groceries_overall = engine.cashback_groceries_others + engine.cashback_groceries_ntuc + engine.cashback_groceries_sheng_siong + engine.cashback_groceries_cold_storage + engine.cashback_groceries_giant\n if groceries_overall > groceries_overall_cashback_cap:\n groceries_overall = groceries_overall_cashback_cap\n\n ##check for online overall\n online_overall = engine.cashback_online_others + engine.cashback_online_hotels_flights\n if online_overall > online_shopping_overall_cashback_cap:\n online_overall = online_shopping_overall_cashback_cap\n\n ##check for petrol overall\n petrol_overal = engine.cashback_petrol_others + engine.cashback_petrol_esso + engine.cashback_petrol_shell + engine.cashback_petrol_caltex\n if petrol_overal > petrol_overal_cashback_cap:\n petrol_overal = petrol_overal_cashback_cap\n\n ##check contactless overall\n cashback_overall = groceries_overall + online_overall + petrol_overal + engine.bill_cashback + engine.cashback_dining + engine.cashback_foreign + engine.cashback_retail + engine.cashback_transport\n \n contactless_cashback = 0\n if cashback_overall >= contactless_cashback_min:\n potential_contactless_cashback = total_spending * contactless_CF * contactless_cashback_rate\n if potential_contactless_cashback <= contactless_cashback_cap:\n contactless_cashback = potential_contactless_cashback\n print(\"contactless cashback within cap amount, %f earned\" %(contactless_cashback))\n else:\n contactless_cashback = contactless_cashback_cap\n print(\"contactless cashback exceed cap amount, %f earned\" %(contactless_cashback))\n\n ##cashback overall amount\n cashback_value = cashback_overall + contactless_cashback\n \n return min(cashback_value, overall_cashback_cap)\n\n\ndef return_reward_value(dict_of_spending_amounts_info, dict_of_preferred_credit_card_spending_rewards_info, contactless_CF, debug=False):\n \n reward_value = 0\n \n dictt = dict_of_spending_amounts_info\n row = dict_of_preferred_credit_card_spending_rewards_info\n \n if debug:\n print(dict_of_spending_amounts_info)\n print(dict_of_preferred_credit_card_spending_rewards_info)\n \n overall_points_min_spend = row['overall_points_min_spend'][0]\n total_spending = sum([dict_of_spending_amounts_info[key] for key in dict_of_spending_amounts_info.keys()])\n\n if total_spending < overall_points_min_spend:\n print(\"Did not hit the overall minimum spending, no points given\")\n reward_value = 0\n return reward_value\n \n cardid = str(row['credit_card_id'][0])\n credit_card_name = str(row['credit_card_name'][0])\n overall_points_cap = row['overall_points_cap'][0]\n contactless_points_multiplier = row['contactless_points_multiplier'][0]\n contactless_points_cap = row['contactless_points_cap'][0]\n contactless_points_lot = row['contactless_points_lot'][0]\n dining_points_multiplier = row['dining_points_multiplier'][0]\n dining_points_cap = row['dining_points_cap'][0]\n dining_points_lot = row['dining_points_lot'][0]\n entertainment_points_multiplier = row['entertainment_points_multiplier'][0]\n entertainment_points_cap = row['entertainment_points_cap'][0]\n entertainment_points_lot = row['entertainment_points_lot'][0]\n foreign_points_multiplier = row['foreign_points_multiplier'][0]\n foreign_points_cap = row['foreign_points_cap'][0]\n foreign_points_lot = row['foreign_points_lot'][0]\n online_shopping_others_points_multiplier = row['online_shopping_others_points_multiplier'][0]\n online_shopping_others_points_cap = row['online_shopping_others_points_cap'][0]\n online_shopping_others_points_lot = row['online_shopping_others_points_lot'][0]\n online_shopping_hotels_and_flight_points_multiplier = row['online_shopping_hotels_and_flight_points_multiplier'][0]\n online_shopping_hotels_and_flights_points_cap = row['online_shopping_hotels_and_flights_points_cap'][0]\n online_shopping_hotels_and_flights_points_lot = row['online_shopping_hotels_and_flights_points_lot'][0]\n retail_shopping_points_multiplier = row['retail_shopping_points_multiplier'][0]\n retail_shopping_points_cap = row['retail_shopping_points_cap'][0]\n retail_shopping_points_lot = row['retail_shopping_points_lot'][0]\n points_to_miles_conversion = row['points_to_miles_conversion'][0]\n \n bill_spending_ = dictt['bill_spending']\n dining_spending_ = dictt['dining_spending']\n entertainment_spending_ = dictt['entertainment_spending']\n foreign_spending_ = dictt['foreign_spending']\n retail_shopping_spending_ = dictt['retail_shopping_spending']\n transport_spending_ = dictt['transport_spending']\n groceries_others_spending_ = dictt['groceries_others_spending']\n groceries_ntuc_spending_ = dictt['groceries_ntuc_spending']\n groceries_sheng_siong_spending_ = dictt['groceries_sheng_siong_spending']\n groceries_cold_storage_spending_ = dictt['groceries_cold_storage_spending']\n groceries_giant_spending_ = dictt['groceries_giant_spending']\n online_shopping_others_spending_ = dictt['online_shopping_others_spending']\n online_shopping_hotels_and_flight_spending_ = dictt['online_shopping_hotels_and_flight_spending']\n petrol_others_spending_ = dictt['petrol_others_spending']\n petrol_esso_spending_ = dictt['petrol_esso_spending']\n petrol_caltex_spending_ = dictt['petrol_caltex_spending']\n petrol_shell_spending_ = dictt['petrol_shell_spending']\n \n class SpendingInput(Fact): \n bill_spending = Field(float)\n dining_spending = Field(float)\n foreign_spending = Field(float)\n retail_shopping_spending = Field(float)\n transport_spending = Field(float)\n groceries_others_spending = Field(int)\n groceries_ntuc_spending = Field(int)\n groceries_sheng_siong_spending = Field(int)\n groceries_cold_storage_spending = Field(int)\n groceries_giant_spending = Field(int)\n online_shopping_others_spending = Field(int)\n online_shopping_hotels_and_flight_spending = Field(int)\n petrol_others_spending = Field(int)\n petrol_esso_spending = Field(int)\n petrol_caltex_spending = Field(int)\n petrol_shell_spending = Field(int)\n pass \n\n class Reward(KnowledgeEngine):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.reward_points = 0\n self.reward_dining = 0\n\n # Reward for dining\n @Rule(AS.v << SpendingInput(data__dining_spending = MATCH.data__dining_spending),\n TEST(lambda data__dining_spending: data__dining_spending >= 0))\n def reward_dining(self):\n self.reward_dining = dining_spending_ // dining_points_lot * dining_points_multiplier\n print(\"%f points were earned from dining\" %(self.reward_dining))\n \n # Reward for entertainment\n @Rule(AS.v << SpendingInput(data__entertainment_spending = MATCH.data__entertainment_spending),\n TEST(lambda data__entertainment_spending: data__entertainment_spending >= 0))\n def reward_entertainment(self):\n self.reward_entertainment = entertainment_spending_ // entertainment_points_lot * entertainment_points_multiplier\n print(\"%f points were earned from entertainment\" %(self.reward_entertainment))\n \n # Reward for foreign spending\n @Rule(AS.v << SpendingInput(data__foreign_spending = MATCH.data__foreign_spending),\n TEST(lambda data__foreign_spending: data__foreign_spending >= 0))\n def reward_foreign(self):\n self.reward_foreign = foreign_spending_ // foreign_points_lot * foreign_points_multiplier\n print(\"%f points were earned from foreign spending\" %(self.reward_foreign))\n \n # Reward for online_shopping_others\n @Rule(AS.v << SpendingInput(data__online_shopping_others_spending = MATCH.data__online_shopping_others_spending),\n TEST(lambda data__online_shopping_others_spending: data__online_shopping_others_spending >= 0))\n def reward_online_shopping_others(self):\n self.reward_online_shopping_others = online_shopping_others_spending_ // online_shopping_others_points_lot * online_shopping_others_points_multiplier\n print(\"%f points were earned from online_shopping_others\" %(self.reward_online_shopping_others))\n \n # Reward for online_shopping_hotels_and_flight\n @Rule(AS.v << SpendingInput(data__online_shopping_hotels_and_flight_spending = MATCH.data__online_shopping_hotels_and_flight_spending),\n TEST(lambda data__online_shopping_hotels_and_flight_spending: data__online_shopping_hotels_and_flight_spending >= 0))\n def reward_online_shopping_hotels_and_flight(self):\n self.reward_online_shopping_hotels_and_flight = online_shopping_hotels_and_flight_spending_ // online_shopping_hotels_and_flights_points_lot * online_shopping_hotels_and_flight_points_multiplier\n print(\"%f points were earned from online_shopping_hotels_and_flight\" %(self.reward_online_shopping_hotels_and_flight))\n \n # Reward for retail_spending\n @Rule(AS.v << SpendingInput(data__retail_shopping_spending = MATCH.data__retail_shopping_spending),\n TEST(lambda data__retail_shopping_spending: data__retail_shopping_spending >= 0))\n def reward_retail(self, v):\n self.reward_retail = retail_shopping_spending_ // retail_shopping_points_lot * retail_shopping_points_multiplier\n print(\"%f points were earned from retail_spending\" %(self.reward_retail))\n \n engine=Reward()\n engine.reset()\n engine.declare(SpendingInput(data=dict_of_spending_amounts_info))\n engine.run()\n engine.facts\n \n reward_value = engine.reward_dining \\\n + engine.reward_entertainment \\\n + engine.reward_foreign \\\n + engine.reward_online_shopping_others \\\n + engine.reward_online_shopping_hotels_and_flight \\\n + engine.reward_retail\n \n ##check contactless overall\n reward_contactless = 0\n potential_reward_contactless = total_spending//contactless_points_lot*contactless_CF\n if potential_reward_contactless <= contactless_points_cap:\n reward_contactless = potential_reward_contactless\n else:\n reward_contactless = contactless_points_cap\n print(\"%f points were earned from contactless spending\" %(reward_contactless))\n\n reward_value = reward_value + reward_contactless\n \n return min(overall_points_cap, reward_value)\n\ndef fuzzy_logic_convert_points_to_cashback_value(reward_value, bank_name):\n if bank_name == 'dbs':\n cashback_equivalent = DBS_points_to_cashback(reward_value)\n elif bank_name == 'citibank':\n cashback_equivalent = citibank_points_to_cashback(reward_value)\n elif bank_name == 'standard chartered':\n cashback_equivalent = standardchartered_points_to_cashback(reward_value)\n elif bank_name == 'uob':\n cashback_equivalent = uob_points_to_cashback(reward_value)\n elif bank_name == 'maybank':\n cashback_equivalent = maybank_points_to_cashback(reward_value)\n elif bank_name == 'hsbc':\n cashback_equivalent = HSBC_points_to_cashback(reward_value)\n elif bank_name == 'ocbc':\n cashback_equivalent = ocbc_points_to_cashback(reward_value)\n else:\n print(\"NONE OF THEM\")\n cashback_equivalent = -1\n return cashback_equivalent\n\ndef fuzzy_logic_convert_miles_to_cashback_value(reward_value, points_to_miles_conversion, bank_name):\n if bank_name == 'dbs':\n cashback_equivalent = DBS_miles_to_cashback(reward_value*points_to_miles_conversion)\n elif bank_name == 'citibank':\n cashback_equivalent = citibank_miles_to_cashback(reward_value*points_to_miles_conversion)\n elif bank_name == 'standard chartered':\n cashback_equivalent = standardchartered_miles_to_cashback(reward_value*points_to_miles_conversion)\n elif bank_name == 'uob':\n cashback_equivalent = uob_miles_to_cashback(reward_value*points_to_miles_conversion)\n elif bank_name == 'maybank':\n cashback_equivalent = maybank_miles_to_cashback(reward_value*points_to_miles_conversion)\n elif bank_name == 'hsbc':\n cashback_equivalent = HSBC_miles_to_cashback(reward_value*points_to_miles_conversion)\n elif bank_name == 'ocbc':\n cashback_equivalent = ocbc_miles_to_cashback(reward_value*points_to_miles_conversion)\n else:\n print(\"NONE OF THEM\")\n cashback_equivalent = -1\n return cashback_equivalent\n\ndef return_compare_by_preference(points_to_miles_conversion, points_split_ratio, cashback_value, reward_value, bank_name, dict_of_cashback_points_miles_preference_info, debug=False):\n preferred_rewards_type = dict_of_cashback_points_miles_preference_info['preferred_rewards_type']\n \n ## Calculate for only ONE preference: cashback/ points/ miles ##\n if set(preferred_rewards_type) == set(['cashback']):\n if debug: print(\"Cashback Preference, %f\" %(cashback_value))\n return cashback_value\n elif set(preferred_rewards_type) == set(['points']):\n if debug: print(\"Points Preference, %f\" %(reward_value))\n return reward_value\n elif set(preferred_rewards_type) == set(['miles']):\n if debug: print(\"Miles Preference, %f\" %(reward_value*points_to_miles_conversion))\n return reward_value*points_to_miles_conversion\n points_cashback_equivalent = fuzzy_logic_convert_points_to_cashback_value(reward_value, bank_name)\n miles_cashback_equivalent = fuzzy_logic_convert_miles_to_cashback_value(reward_value*points_to_miles_conversion, points_to_miles_conversion, bank_name)\n\n ## Calculate for TWO preference: any 2 out of cashback/ points/ miles ##\n if set(preferred_rewards_type) == set(['cashback', 'points']):\n total_cash_val_equivalent = cashback_value + points_cashback_equivalent\n if debug: print(\"Cashback & Points Preference, %f\" %(total_cash_val_equivalent))\n return total_cash_val_equivalent\n elif set(preferred_rewards_type) == set(['cashback', 'miles']):\n total_cash_val_equivalent = cashback_value + points_split_ratio*miles_cashback_equivalent\n if debug: print(\"Cashback & Miles Preference, %f\" %(total_cash_val_equivalent))\n return total_cash_val_equivalent\n elif set(preferred_rewards_type) == set(['points', 'miles']):\n total_cash_val_equivalent = points_split_ratio*points_cashback_equivalent + (1 - points_split_ratio)*miles_cashback_equivalent\n if debug: print(\"Points & Miles Preference, %f\" %(total_cash_val_equivalent))\n return total_cash_val_equivalent\n\n ## Calculate for THREE preference: all three of cashback/ points/ miles ##\n if set(preferred_rewards_type) == set(['cashback', 'points', 'miles']):\n total_cash_val_equivalent = cashback_value + points_split_ratio*points_cashback_equivalent + (1 - points_split_ratio)*miles_cashback_equivalent\n if debug: print(\"Cashback & Points & Miles Preference, %f\" %(total_cash_val_equivalent))\n return total_cash_val_equivalent\n\ndef return_best_credit_card(dict_of_spending_amounts_info, dict_of_credit_card_spending_rewards_info, dict_of_cashback_points_miles_preference_info, debug=False):\n\n points_split_ratio = 0.5\n contactless_CF=0.75\n\n ## Get all the pertinent info from all the credit cards in this set ##\n credit_card_pertinent_info = []\n for row in dict_of_credit_card_spending_rewards_info:\n cardid = str(row['credit_card_id'][0])\n credit_card_name = str(row['credit_card_name'][0])\n official_link = str(row['official_link'][0])\n points_to_miles_conversion = row['points_to_miles_conversion'][0]\n annual_fee = row['annual_fee'][0]\n bank_name = row['bank_name'][0]\n card_type = row['card_type']\n print(card_type)\n print(\"\\n\\n\") \n \n cashback_value = return_cashback_value(dict_of_spending_amounts_info, row, contactless_CF, debug)\n reward_value = return_reward_value(dict_of_spending_amounts_info, row, contactless_CF, debug)\n\n total_cash_val_equivalent = return_compare_by_preference(points_to_miles_conversion, points_split_ratio, cashback_value, reward_value, bank_name, dict_of_cashback_points_miles_preference_info, debug)\n\n if debug:\n print(\"cardid, credit_card_name, official_link, points_to_miles_conversion, points_split_ratio, cashback_value, reward_value, total_cash_val_equivalent\")\n print(credit_card_name, official_link, points_to_miles_conversion, points_split_ratio, cashback_value, reward_value, total_cash_val_equivalent)\n \n credit_card_name = credit_card_name\n credit_card_official_link = official_link\n cashback_amount = 0\n points_amount = 0\n miles_amount = 0\n annual_fee = annual_fee\n total_cash_val_equivalent = total_cash_val_equivalent\n \n ## Calculate for only ONE preference: cashback/ points/ miles ##\n if set(card_type) == set(['cashback']):\n cashback_amount = cashback_value\n elif set(card_type) == set(['points']):\n if debug: print(\"Points Preference, %f\" %(reward_value))\n points_amount = reward_value\n elif set(card_type) == set(['miles']):\n if debug: print(\"Miles Preference, %f\" %(reward_value*points_to_miles_conversion))\n miles_amount = reward_value*points_to_miles_conversion\n\n ## Calculate for TWO preference: any 2 out of cashback/ points/ miles ##\n if set(card_type) == set(['cashback', 'points']):\n cashback_amount = cashback_value\n points_amount = reward_value\n elif set(card_type) == set(['cashback', 'miles']):\n cashback_amount = cashback_value\n miles_amount = reward_value\n elif set(card_type) == set(['points', 'miles']):\n points_amount = points_split_ratio*reward_value \n miles_amount = (1 - points_split_ratio)*reward_value\n\n ## Calculate for THREE preference: all three of cashback/ points/ miles ##\n if set(card_type) == set(['cashback', 'points', 'miles']):\n cashback_amount = cashback_value\n points_amount = points_split_ratio*reward_value \n miles_amount = (1 - points_split_ratio)*reward_value\n \n credit_card_pertinent_info.append([credit_card_name, official_link, cashback_amount, points_amount, miles_amount, annual_fee, total_cash_val_equivalent])\n \n ## Sort the best credit card in this set and return it in values ##\n best_total_cash_val_equivalent = max([x[-1] for x in credit_card_pertinent_info])\n best_credit_cards = [x for x in credit_card_pertinent_info if x[-1] == best_total_cash_val_equivalent]\n if debug:\n print(credit_card_pertinent_info)\n print(\"The best Cash Value equivalent is %f\" %(best_total_cash_val_equivalent))\n print(\"The best Credit Cards are %r\" %(best_credit_cards))\n if len(best_credit_cards) == 1: # If there is only one best Credit Card\n return best_credit_cards[0]\n return best_credit_cards[0]\n"
},
{
"alpha_fraction": 0.5330073237419128,
"alphanum_fraction": 0.6112469434738159,
"avg_line_length": 21.72222137451172,
"blob_id": "f66a355209b696070661a35ed3e1ead72541bd7c",
"content_id": "cb9f22833f1afd309f1263fb1d39429bde3e4e7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 18,
"path": "/SystemCode/Recommender/migrations/0003_auto_20190224_1116.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.5 on 2019-02-24 11:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Recommender', '0002_auto_20190224_0903'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='creditcards',\n name='eligible',\n field=models.PositiveSmallIntegerField(default=2),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5573539733886719,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 38.47222137451172,
"blob_id": "29cbccb3a035721845efc300900c59f00de9e32a",
"content_id": "27f85a8aa97f26704841ee5c79707709d74606d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1421,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 36,
"path": "/SystemCode/Recommender/migrations/0001_initial.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.5 on 2019-02-24 07:43\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='CreditCards',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('annual_income_singaporean_min', models.PositiveIntegerField()),\n ('annual_income_pr_min', models.PositiveIntegerField()),\n ('annual_income_foreigner_min', models.PositiveIntegerField()),\n ('age_min', models.PositiveSmallIntegerField()),\n ('age_max', models.PositiveSmallIntegerField()),\n ('total_spending_amount_min', models.PositiveIntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('age', models.PositiveSmallIntegerField()),\n ('citizenship', models.CharField(choices=[(1, 'Singaporean'), (2, 'PR'), (3, 'Foreigner')], max_length=12)),\n ('annual_income', models.PositiveIntegerField()),\n ('total_spending_amount', models.PositiveIntegerField()),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.7278911471366882,
"alphanum_fraction": 0.7823129296302795,
"avg_line_length": 28.399999618530273,
"blob_id": "1b4400c6cb33742d365ab057a569169e91cbe3f8",
"content_id": "88426c54404f50ee242d14bfd1533dd3a8ee8930",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 5,
"path": "/SystemCode/venv/MRCard-env/bin/2to3-3.6",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "#!/home/iss-user/MTech19/MRCard/SystemCode/venv/MRCard-env/bin/python3.6\nimport sys\nfrom lib2to3.main import main\n\nsys.exit(main(\"lib2to3.fixes\"))\n"
},
{
"alpha_fraction": 0.48934197425842285,
"alphanum_fraction": 0.5560704469680786,
"avg_line_length": 17.160715103149414,
"blob_id": "367dd368f309e263411fcd1f27fcf5d24e7a0ee7",
"content_id": "3c89595a1b3c7e19730ab08bf0a136086871132d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1079,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 56,
"path": "/SystemCode/Recommender/fuzzy_logic.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "def DBS_points_to_cashback(x):\r\n return x/5\r\n \r\ndef DBS_miles_to_cashback(x):\r\n if x < 10000:\r\n return x/2.5\r\n else:\r\n return 4000\r\n\r\ndef citibank_points_to_cashback(x):\r\n return x/440\r\n\r\ndef citibank_miles_to_cashback(x):\r\n return x/165\r\n\r\ndef standardchartered_points_to_cashback(x):\r\n return x/320\r\n\r\ndef standardchartered_miles_to_cashback(x):\r\n return x/128\r\n\r\ndef uob_points_to_cashback(x):\r\n if x < 1000:\r\n return 0.1*x\r\n else:\r\n return 100\r\n\r\ndef uob_miles_to_cashback(x):\r\n if x < 1000:\r\n return 0.1*x\r\n else:\r\n return 100\r\n \r\ndef maybank_points_to_cashback(x):\r\n if x < 15000:\r\n return x/300\r\n else:\r\n return 50\r\n \r\ndef maybank_miles_to_cashback(x):\r\n if x < 6000:\r\n return x/750\r\n else:\r\n return 50\r\n \r\ndef HSBC_points_to_cashback(x):\r\n return x/300\r\n \r\ndef HSBC_miles_to_cashback(x):\r\n return x/120\r\n \r\ndef ocbc_points_to_cashback(x):\r\n return x/350\r\n \r\ndef ocbc_miles_to_cashback(x):\r\n return x*2.5\r\n \r\n"
},
{
"alpha_fraction": 0.651123583316803,
"alphanum_fraction": 0.651123583316803,
"avg_line_length": 25.522388458251953,
"blob_id": "038d7390f6ee65571abab82b1eb093efb887948d",
"content_id": "9adac166b482ee126fffda3f1897d01c72ad9bb2",
"detected_licenses": [
"Python-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1780,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 67,
"path": "/SystemCode/venv/MRCard-env/lib/python3.6/site-packages/pyknow-1.7.0.dist-info/DESCRIPTION.rst",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "PyKnow: Expert Systems for Python\n=================================\n\n.. image:: https://img.shields.io/pypi/v/pyknow.svg\n :target: https://pypi.python.org/pypi/pyknow\n\n.. image:: https://img.shields.io/pypi/pyversions/pyknow.svg\n :target: https://pypi.python.org/pypi/pyknow\n\n.. image:: https://travis-ci.org/buguroo/pyknow.svg?branch=master\n :target: https://travis-ci.org/buguroo/pyknow\n\n.. image:: https://readthedocs.org/projects/pyknow/badge/?version=stable\n :target: https://readthedocs.org/projects/pyknow/?badge=stable\n :alt: Documentation Status\n\n.. image:: https://codecov.io/gh/buguroo/pyknow/branch/develop/graph/badge.svg\n :target: https://codecov.io/gh/buguroo/pyknow\n :alt: codecov.io\n\n\nPyKnow is a Python library for building expert systems strongly inspired\nby CLIPS_.\n\n.. code-block:: python\n\n from random import choice\n from pyknow import *\n\n\n class Light(Fact):\n \"\"\"Info about the traffic light.\"\"\"\n pass\n\n\n class RobotCrossStreet(KnowledgeEngine):\n @Rule(Light(color='green'))\n def green_light(self):\n print(\"Walk\")\n\n @Rule(Light(color='red'))\n def red_light(self):\n print(\"Don't walk\")\n\n @Rule(AS.light << Light(color=L('yellow') | L('blinking-yellow')))\n def cautious(self, light):\n print(\"Be cautious because light is\", light[\"color\"])\n\n\n.. code-block:: python\n\n >>> engine = RobotCrossStreet()\n >>> engine.reset()\n >>> engine.declare(Light(color=choice(['green', 'yellow', 'blinking-yellow', 'red'])))\n >>> engine.run()\n Be cautious because light is blinking-yellow\n\n\nYou can find some more examples on GitHub_.\n\n.. _CLIPS: http://clipsrules.sourceforge.net\n.. _GitHub: https://github.com/buguroo/pyknow/tree/develop/docs\n\n\n\nTODO\n====\n\n\n\n"
},
{
"alpha_fraction": 0.7159090638160706,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 34.20000076293945,
"blob_id": "57ce1c649968fe3983e0c7869835f8b39186465a",
"content_id": "ab40e5d2c442748562b66b8b48489deec18d55fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 176,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 5,
"path": "/SystemCode/venv/MRCard-env/bin/django-admin.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "#!/home/iss-user/MTech19/MRCard/SystemCode/venv/MRCard-env/bin/python\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n"
},
{
"alpha_fraction": 0.7521252632141113,
"alphanum_fraction": 0.7586129903793335,
"avg_line_length": 30.70212745666504,
"blob_id": "2e1705bc7ac82798f6b3a8e8abea55b04e1f5e20",
"content_id": "8b597f4e81c5ab166e666d145d7ff5821646978c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4470,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 141,
"path": "/SystemCode/Recommender/config.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "#configurations, maybe it shouldn't be here, but meh\ncredit_card_eligibility_list = ['credit_card_id',\n'credit_card_name',\n'multiple_levels',\n'bank_name',\n'payment_networks',\n'age_min',\n'age_max',\n'gender_req',\n'annual_income_singaporean_min',\n'annual_income_pr_min',\n'annual_income_foreigner_min']\ncredit_card_preference_list = ['credit_card_id',\n'credit_card_name',\n'bank_name',\n'card_type',\n'payment_networks']\ncredit_card_spending_rewards_list = ['credit_card_id',\n'credit_card_name',\n'bank_name',\n'card_type',\n'multiple_levels',\n'official_link',\n'foreign_currency_transaction_fee',\n'annual_fee',\n'annual_fee_waiver_min_spend',\n'overall_points_cap',\n'overall_points_min_spend',\n'contactless_points_multiplier',\n'contactless_points_cap',\n'contactless_points_lot',\n'dining_points_multiplier',\n'dining_points_cap',\n'dining_points_lot',\n'entertainment_points_multiplier',\n'entertainment_points_cap',\n'entertainment_points_lot',\n'foreign_points_multiplier',\n'foreign_points_cap',\n'foreign_points_lot',\n'online_shopping_others_points_multiplier',\n'online_shopping_others_points_cap',\n'online_shopping_others_points_lot',\n'online_shopping_hotels_and_flight_points_multiplier',\n'online_shopping_hotels_and_flights_points_cap',\n'online_shopping_hotels_and_flights_points_lot',\n'retail_shopping_points_multiplier',\n'retail_shopping_points_cap',\n'retail_shopping_points_lot',\n'points_to_miles_conversion',\n'overall_cashback_cap',\n'overall_cashback_min_spend',\n'cash_cashback',\n'bill_cashback_rate',\n'bill_cashback_cap',\n'bill_cashback_min_spend',\n'contactless_cashback_rate',\n'contactless_cashback_cap',\n'contactless_cashback_min_spend',\n'dining_cashback_rate',\n'dining_cashback_cap',\n'dining_cashback_min_spend',\n'foreign_cashback_rate',\n'foreign_cashback_cap',\n'foreign_cashback_min_spend',\n'groceries_overall_cashback_cap',\n'groceries_others_cashback_rate',\n'groceries_others_cashback_cap',\n'groceries_others_cashback_min_spend',\n'groceries_ntuc_cashback_rate',\n'groceries_ntuc_cashback_cap',\n'groceries_ntuc_cashback_min_spend',\n'groceries_sheng_siong_cashback_rate',\n'groceries_sheng_siong_cashback_cap',\n'groceries_sheng_siong_cashback_min_spend',\n'groceries_cold_storage_cashback_rate',\n'groceries_cold_storage_cashback_cap',\n'groceries_cold_storage_cashback_min_spend',\n'groceries_giant_cashback_rate',\n'groceries_giant_cashback_cap',\n'groceries_giant_cashback_min_spend',\n'online_shopping_overall_cashback_cap',\n'online_shopping_others_cashback_rate',\n'online_shopping_others_cashback_cap',\n'online_shopping_others_cashback_min_spend',\n'online_shopping_hotels_and_flights_cashback_rate',\n'online_shopping_hotels_and_flights_cashback_cap',\n'online_shopping_hotels_and_flights_cashback_min_spend',\n'petrol_overal_cashback_cap',\n'petrol_others_cashback_rate',\n'petrol_others_cashback_cap',\n'petrol_others_cashback_min_spend',\n'petrol_esso_cashback_rate',\n'petrol_esso_cashback_cap',\n'petrol_esso_cashback_min_spend',\n'petrol_caltex_cashback_rate',\n'petrol_caltex_cashback_cap',\n'petrol_caltex_cashback_min_spend',\n'petrol_shell_cashback_rate',\n'petrol_shell_cashback_cap',\n'petrol_shell_cashback_min_spend',\n'retail_shopping_cashback_rate',\n'retail_shopping_cashback_cap',\n'retail_shopping_cashback_min_spend',\n'transport_cashback_rate',\n'transport_cashback_cap',\n'transport_cashback_min_spend']\npersonal_info_default_dict = {'annual_income':0,\n'age':0,\n'gender':'M',\n'citizenship':'foreigner',\n'total_spending_amount':0}\npreference_info_default_dict = {'preferred_bank': ['dbs', 'ocbc', 'hsbc', 'citibank', 'maybank', 'uob', 'standard chartered'], \n'preferred_card_type': ['visa', 'mastercard', 'american express', 'jcb'], \n'preferred_rewards_type': ['cashback', 'miles', 'points']}\nspending_checkbox_default_dict = {'bill_checkbox':0,\n'dining_checkbox':0,\n'entertainment_checkbox':0,\n'foreign_checkbox':0,\n'groceries_checkbox':0,\n'online_shopping_checkbox':0,\n'petrol_checkbox':0,\n'retail_shopping_checkbox':0,\n'transport_checkbox':0}\nspending_amounts_default_dict = {'bill_spending':0,\n'dining_spending':0,\n'entertainment_spending':0,\n'foreign_spending':0,\n'groceries_others_spending':0,\n'groceries_ntuc_spending':0,\n'groceries_sheng_siong_spending':0,\n'groceries_cold_storage_spending':0,\n'groceries_giant_spending':0,\n'online_shopping_others_spending':0,\n'online_shopping_hotels_and_flight_spending':0,\n'petrol_others_spending':0,\n'petrol_esso_spending':0,\n'petrol_caltex_spending':0,\n'petrol_shell_spending':0,\n'retail_shopping_spending':0,\n'transport_spending':0}\n"
},
{
"alpha_fraction": 0.7361891865730286,
"alphanum_fraction": 0.7747625708580017,
"avg_line_length": 58.988372802734375,
"blob_id": "05465414c93c03e702db1cb9d61986ba9bba643c",
"content_id": "b6cce460581f56106bbcd6c41a73d42aaebddac3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5159,
"license_type": "no_license",
"max_line_length": 639,
"num_lines": 86,
"path": "/README.md",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "# SECTION 1 : PROJECT TITLE\n### MRCard Recommender System\n<img width=\"812\" alt=\"welcome\" src=\"https://user-images.githubusercontent.com/48171290/54080819-80836a80-4333-11e9-9f1d-7f21123d454f.png\">\n\n# SECTION 2 : EXECUTIVE SUMMARY / PAPER ABSTRACT\nWith the income of working adults in singapore steadily rising over the years, many people are gaining access to credit cards, especially young working adults. The majority of adults nowadays own at least one or more credit cards, with many others planning to start using credit cards as well. Banks have also been actively coming up with more credit cards and trying and to get consumers to take them up. \n\nThere can be many advantages in having a credit card. One advantage is that credit card users can earn benefits in terms of rebates, air miles, and rewards. This is usually the main draw for people to use credit cards. However, not every card is suitable for everyone. Each card has its own requirements and rates, and whether the user can earn the benefits from the card largely depends on their lifestyle and spending habits. With many credit cards available from the banks in Singapore, it can be a time-consuming task to pick up a suitable credit card, and many people simply get cards where their potential benefits are not maximised.\n\nAs a group of 5 young working professionals, we felt that this was a very relevant issue. Hence, we came up with the idea of designing a recommendation system to recommend the most suitable credit card or saving account based on the applicant's personal background, spending habits and personal preferences.\n\nFor this project, we first set out to perform knowledge acquisition by interviewing a subject matter expert, and also conducting a survey. To build the system, we decided to utilise the Django web framework, for its ease of integration with the front-end user interface (done with HTML), and the back-end rules engine (PyKnow) that we used to perform rule-based reasoning.\n\nOur team learned a lot in the process of working on this project. We got the chance to apply techniques (like knowledge acquisition and rule-based reasoning) that we learned in our lectures and workshops in a viable business application scenario, and also picked up technical skills which would surely prove useful in the future course of our work.\n\n# SECTION 3 : CREDITS / PROJECT CONTRIBUTION\n\n| Official Full Name | Student ID (MTech Applicable)| Work Items (Who Did What) | Email (Optional) |\n| :---: | :---: | :---: | :---: |\n| LI DUO | A0195364W | Business idea generation, domain expert interview, reward rules implementation, project video and project report | [email protected] |\n| LIM CHONG SENG HERMANN | A0195392U | Business idea generation, project report and testing execution | [email protected] |\n| LU JIAHAO | A0091835Y | Business idea generation, UI design, domain expert interview, data clean, project report integration and testing execution | [email protected] |\n| YAM GUI PENG DAVID | A0195315A | Business idea generation, overall rules implementation, database and backend logic, overall integration and project report | [email protected] |\n| ZHAO YAZHI | A0195305E | Business idea generation, cashback rules implementation, survey result analysis, fuzzy logic implementation and project report | [email protected] |\n\n# SECTION 4 : VIDEO OF SYSTEM MODELLING & USE CASE DEMO\n[](https://www.youtube.com/watch?v=vu1eQ-0R4e8&feature=youtu.be)\n\n\n# SECTION 5 : USER GUIDE\n[ 1 ] To run the system in any machine with anaconda 3 installed\n\n$ git clone https://github.com/davidygp/IRS-MR-2019-01-19-IS1PT-GRP-MRCard\n\n$ cd ./IRS-MR-2019-01-19-IS1PT-GRP-MRCard/SystemCode\n\n$ source activate ./venv/MRCard-env\n\n(MRCard-env) $ python manage.py runserver\n\nGo to URL using web browser http://127.0.0.1:8000/\n\n$ (MRCard-env) $ source deactivate\n\n[ 2 ] To run the system in other/local machine: Install additional necessary libraries. This application works in python 3 only.\n\n$ pip install anaconda 3 \n\n$ git clone https://github.com/davidygp/IRS-MR-2019-01-19-IS1PT-GRP-MRCard\n\n$ cd ./IRS-MR-2019-01-19-IS1PT-GRP-MRCard/SystemCode\n\n$ source activate ./venv/MRCard-env\n\n(MRCard-env) $ python manage.py runserver\n\nGo to URL using web browser http://127.0.0.1:8000/\n\n$ (MRCard-env) $ source deactivate\n\n# SECTION 6 : PROJECT REPORT / PAPER\n<Github File Link> https://github.com/davidygp/IRS-MR-2019-01-19-IS1PT-GRP-MRCard/tree/master/ProjectReport/Report.pdf\n\nRecommended Sections for Project Report / Paper:\n\n+ Executive Summary / Paper Abstract\n+ Business Problem Background\n+ Project Objectives & Success Measurements\n+ Project Solution\n+ Project Performance & Validation\n+ Project Conclusions: Findings & Recommendation\n+ References\n\n# SECTION 7 : MISCELLANEOUS\nMRCard Survey Result.xlsx\n+ Results of survey\n+ Insights derived, which helped on features selection that are subsequently used in our system\n\nInterview with Hu Juan.mps\n+ Audio of the interview process with domain expert\n\nCard Data - Bank Card Data (Cleaned_v2).csv\n+ Data that used in the backend \n\nData Fields - Sheet1.csv\n+ Variables that used in the backend and rules\n"
},
{
"alpha_fraction": 0.6584597826004028,
"alphanum_fraction": 0.6603206396102905,
"avg_line_length": 47.342559814453125,
"blob_id": "f9026ddba67a1044d87696047fa28d2bdc99cce1",
"content_id": "8d36210231335379ec39c1b2d2d503e3c9085546",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13972,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 289,
"path": "/SystemCode/Recommender/views.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.http import HttpResponse\nfrom .models import CreditCards\nfrom .config import *\nfrom .rules import *\n\ndebug = True\n\n# Functions used below\ndef retrieve_data_or_set_to_default(input_dict, default_dict, expecting_lists=False):\n ''' \n Given a dictionary of data (e.g. from the HTML POST request) and a default dictionary.\n For fields that are not in the dictionary of data but in the default dictionary, assign the new fields as the default.\n '''\n output_dict = {}\n for key in default_dict.keys():\n if expecting_lists:\n try: \n val = input_dict.getlist(key) # Check if key exists (for list)\n if len(val) == 0:\n output_dict[key] = default_dict[key]\n else:\n output_dict[key] = [convert_string_to_int_or_float(x) for x in val]\n except:\n output_dict[key] = default_dict[key]\n else:\n try:\n val = input_dict[key] # Check if key exists (for non-list)\n if len(val) == 0: # If it is empty\n output_dict[key] = default_dict[key]\n else:\n output_dict[key] = convert_string_to_int_or_float(val) \n except:\n output_dict[key] = default_dict[key] \n return output_dict\n\ndef convert_string_to_int_or_float(string):\n '''\n Converts the string which can contain a string, int or float, into a string, int or float\n (I'm sure there's a better way to do this.)\n '''\n try:\n output = int(string) \n return output\n except:\n try:\n output = float(string)\n return output\n except:\n return string.lower()\n return string.lower()\n \ndef retrieve_subset_out_of_query_set(query_set, list_of_keys):\n '''\n Takes a QuerySet object from django in the format [{}], i.e. list of dictionaries &\n a list of keys that forms a subset of the keys of the dictionaries\n\n Returns a list of dictionaries of the format [{}], that is a smaller subset\n '''\n subset_of_query_set = []\n for i in range(len(query_set)):\n sub_dictionary = {}\n for key in list_of_keys:\n item = query_set[i][key]\n try:\n processed_item = [convert_string_to_int_or_float(x) for x in item.split(',')]\n if key == 'credit_card_name':\n processed_item = [x for x in item.split(',')] # Don't lower for Credit Card Names\n except:\n if key == 'credit_card_name':\n processed_item = [item] # Don't lower for Credit Card Names\n else:\n processed_item = [convert_string_to_int_or_float(item)]\n sub_dictionary[key] = processed_item\n subset_of_query_set.append(sub_dictionary)\n return subset_of_query_set\n\ndef map_POST_to_session(request, expecting_lists=False):\n key_list = list(request.POST.keys())\n key_list.remove('csrfmiddlewaretoken')\n for key in key_list:\n if expecting_lists:\n request.session[key] = request.POST.getlist(key)\n else:\n request.session[key] = request.POST[key] \n\ndef return_subset_out_of_spending_rewards_info_by_cardid(list_of_dict_of_credit_card_spending_rewards_info, list_of_card_ids):\n subset_of_spending_rewards_info = []\n for credit_card in list_of_dict_of_credit_card_spending_rewards_info:\n if str(credit_card['credit_card_id'][0]) in list_of_card_ids or credit_card['credit_card_id'][0] in list_of_card_ids:\n subset_of_spending_rewards_info.append(credit_card)\n return subset_of_spending_rewards_info\n\n# Views Proper #\ndef eligibility(request):\n # This is the second html page\n return render(request, 'Recommender/eligibility.html')\n\ndef welcome(request):\n # This is the first html page\n return render(request, 'Recommender/welcome.html')\n\ndef test(request):\n # This is the test html page\n return render(request, 'Recommender/test.html')\n\ndef preferences(request):\n # This is the third html page\n map_POST_to_session(request) # Save the POST data into the session\n \n ### Process Data to determine eligibility ###\n ## Retrieve Personal eligibility info ##\n personal_info = retrieve_data_or_set_to_default(request.POST, personal_info_default_dict)\n ## Retrieve Credit Card eligibility related info ##\n all_credit_card_info = CreditCards.objects.values()\n credit_card_eligibility_info = retrieve_subset_out_of_query_set(all_credit_card_info, credit_card_eligibility_list)\n #print(credit_card_eligibility_info)\n if debug:\n print(\"---- Personal Info ----\")\n print(personal_info)\n print(\"---- Credit Card Eligibility Info ----\")\n print(credit_card_eligibility_info)\n ## Calculate the eligible Credit Cards here ##\n eligible_credit_card_ids = return_eligibile_credit_card_ids(personal_info, credit_card_eligibility_info) \n if debug:\n print(\"---- Eligible Credit Cards ----\")\n print(eligible_credit_card_ids)\n request.session['eligible_credit_card_ids'] = eligible_credit_card_ids['eligible_credit_card_ids']\n if len(eligible_credit_card_ids['eligible_credit_card_ids']) == 0:\n return render(request, 'Recommender/no_recommendation.html')\n else:\n return render(request, 'Recommender/preferences.html')\n\n\ndef spending_checkbox(request):\n # This is the fourth html page\n map_POST_to_session(request, True) # Save the POST data into the session\n \n ### Process Data to determine preference ###\n ## Retrieve Preference info ##\n print(request.POST)\n preference_info = retrieve_data_or_set_to_default(request.POST, preference_info_default_dict, True)\n ## Retrieve Credit Card preference related info ##\n all_credit_card_info = CreditCards.objects.values()\n credit_card_preference_info = retrieve_subset_out_of_query_set(all_credit_card_info, credit_card_preference_list)\n if debug:\n print(\"---- Preference Info ----\")\n print(preference_info)\n print(\"---- Credit Card Preference Info ----\")\n print(credit_card_preference_info)\n ## Calculate the preferred Credit Cards here ##\n preferred_credit_card_ids = return_preferred_credit_card_ids(preference_info, credit_card_preference_info)\n if debug:\n print(\"---- Preferred Credit Cards ----\")\n print(preferred_credit_card_ids)\n request.session['preferred_credit_card_ids'] = preferred_credit_card_ids['preferred_credit_card_ids']\n return render(request, 'Recommender/spending_checkbox.html')\n\n\ndef spending_amount(request):\n # This is the fifth html page\n map_POST_to_session(request) # Save the POST data into the session\n print(request.POST)\n print(\"Session\")\n print(request.session)\n ## Retrieve Spending Checkbox info ##\n spending_checkbox_info = retrieve_data_or_set_to_default(request.POST, spending_checkbox_default_dict)\n if debug:\n print(\"---- Spending Checkbox Info ----\")\n print(spending_checkbox_info)\n ## Assign what data to show in the spending_amount.html ##\n eligible_spending = return_eligible_spendings_for_breakdown(spending_checkbox_info)\n if debug:\n print(\"---- Spending Breakdown Info ----\")\n print(eligible_spending)\n #eligible_spending = {'eligible_spending':['bill','dining','groceries','transport']} # Get this from LD/YZ\n context = {\n 'eligible_spending': eligible_spending['eligible_spending']\n }\n return render(request, 'Recommender/spending_amount.html', context)\n\n\ndef recommendation(request):\n # This is the last html page\n map_POST_to_session(request) # Save the POST data into the session\n \n ## Retrieve Preference info ##\n rewards_type_preference_info = {'preferred_rewards_type': request.session['preferred_rewards_type']}\n ## Retrieve Eligible Credit Cards ##\n eligible_credit_card_ids = request.session['eligible_credit_card_ids']\n ## Retrieve Preferred Credit Cards ##\n preferred_credit_card_ids = request.session['preferred_credit_card_ids']\n ## Retrieve Spending Amounts info ##\n spending_amounts_info = retrieve_data_or_set_to_default(request.POST, spending_amounts_default_dict)\n ## Retrieve Credit Card cashback/miles/points info ##\n all_credit_card_info = CreditCards.objects.values()\n credit_card_spending_rewards_info = retrieve_subset_out_of_query_set(all_credit_card_info, credit_card_spending_rewards_list)\n ## Only provide to the Rules the cards that are Eligible and Eligible & Preferred ##\n eligible_and_preferred_credit_card_ids = [x for x in eligible_credit_card_ids if x in preferred_credit_card_ids]\n ideal_credit_card_spending_rewards_info = return_subset_out_of_spending_rewards_info_by_cardid(credit_card_spending_rewards_info, eligible_credit_card_ids)\n preferred_credit_card_spending_rewards_info = return_subset_out_of_spending_rewards_info_by_cardid(credit_card_spending_rewards_info, eligible_and_preferred_credit_card_ids)\n if debug:\n print(\"---- Prefered Rewards Type ----\")\n print(rewards_type_preference_info)\n print(\"---- Eligible Credit Card IDs ----\")\n print(eligible_credit_card_ids)\n print(\"---- Preferred Credit Card IDs ----\")\n print(preferred_credit_card_ids)\n print(\"---- Spending Amounts Info ----\")\n print(spending_amounts_info)\n print(\"---- Eligible and Preferred Credit Card IDs ----\")\n print(eligible_and_preferred_credit_card_ids)\n print(\"---- Ideal Credit Card Rewards Info ----\")\n print(ideal_credit_card_spending_rewards_info)\n print(\"---- Preferred Credit Cards Rewards Info ----\")\n print(preferred_credit_card_spending_rewards_info)\n ## Calculate the Ideal & Preferred Credit Card ##\n ideal_credit_card_list = return_best_credit_card(spending_amounts_info, ideal_credit_card_spending_rewards_info, {'preferred_rewards_type':['cashback', 'points', 'miles']})\n ideal_credit_card_name = ideal_credit_card_list[0]\n ideal_credit_card_official_link = ideal_credit_card_list[1]\n ideal_cashback_amount = ideal_credit_card_list[2]\n ideal_points_amount = ideal_credit_card_list[3]\n ideal_miles_amount = ideal_credit_card_list[4]\n ideal_annual_fee = ideal_credit_card_list[5]\n\n if len(preferred_credit_card_ids) == 0:\n preferred_credit_card_exists = 0\n preferred_credit_card_name = \"No available Credit Card\"\n preferred_credit_card_official_link = \"https://www.google.com\"\n preferred_cashback_amount = 0\n preferred_points_amount = 0\n preferred_miles_amount = 0\n preferred_annual_fee = 0\n else:\n preferred_credit_card_exists = 1\n preferred_credit_card_list = return_best_credit_card(spending_amounts_info, preferred_credit_card_spending_rewards_info, rewards_type_preference_info) \n preferred_credit_card_name = preferred_credit_card_list[0]\n preferred_credit_card_official_link = preferred_credit_card_list[1]\n preferred_cashback_amount = preferred_credit_card_list[2]\n preferred_points_amount = preferred_credit_card_list[3]\n preferred_miles_amount = preferred_credit_card_list[4]\n preferred_annual_fee = preferred_credit_card_list[5]\n if debug:\n print(\"---- Ideal Credit Card ----\")\n print(ideal_credit_card_list)\n print(\"---- Preferred Credit Card ----\")\n if len(preferred_credit_card_ids) >= 1:\n print(preferred_credit_card_list)\n elif len(preferred_credit_card_ids) == 0:\n print(\"There are no suitable Credit Cards based on user's preferences\")\n \n Recommendation = {'ideal_credit_card_name':ideal_credit_card_name,\n 'preferred_credit_card_name':preferred_credit_card_name,\n 'ideal_credit_card_official_link':ideal_credit_card_official_link,\n 'ideal_cashback_amount':ideal_cashback_amount,\n 'ideal_miles_amount':ideal_miles_amount,\n 'ideal_points_amount':ideal_points_amount,\n 'ideal_annual_fee':ideal_annual_fee,\n 'preferred_credit_card_exists':preferred_credit_card_exists,\n 'preferred_credit_card_official_link':preferred_credit_card_official_link,\n 'preferred_cashback_amount':preferred_cashback_amount,\n 'preferred_miles_amount':preferred_miles_amount,\n 'preferred_points_amount':preferred_points_amount,\n 'preferred_annual_fee':preferred_annual_fee}\n\n context = {\n 'ideal_credit_card_name':Recommendation['ideal_credit_card_name'],\n 'preferred_credit_card_name':Recommendation['preferred_credit_card_name'],\n 'ideal_credit_card_official_link':Recommendation['ideal_credit_card_official_link'],\n 'ideal_cashback_amount':Recommendation['ideal_cashback_amount'],\n 'ideal_miles_amount':Recommendation['ideal_miles_amount'],\n 'ideal_points_amount':Recommendation['ideal_points_amount'],\n 'ideal_annual_fee':Recommendation['ideal_annual_fee'],\n 'preferred_credit_card_exists':Recommendation['preferred_credit_card_exists'],\n 'preferred_credit_card_official_link':Recommendation['preferred_credit_card_official_link'],\n 'preferred_cashback_amount':Recommendation['preferred_cashback_amount'],\n 'preferred_miles_amount':Recommendation['preferred_miles_amount'],\n 'preferred_points_amount':Recommendation['preferred_points_amount'],\n 'preferred_annual_fee':Recommendation['preferred_annual_fee']\n }\n return render(request, 'Recommender/recommendation.html', context)\n\n\ndef no_recommendation(requests):\n # This is also the last html page (if there are no eligible Credit Cards)\n return render(request, 'Recommender/no_recommendation.html')\n\n"
},
{
"alpha_fraction": 0.5309090614318848,
"alphanum_fraction": 0.5309090614318848,
"avg_line_length": 27.779069900512695,
"blob_id": "f749d42be30c81fe4ab28afc7df8dfec9464f936",
"content_id": "528a58db84517e44519f785cae3549233921225e",
"detected_licenses": [
"Python-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4950,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 172,
"path": "/SystemCode/venv/MRCard-env/lib/python3.6/site-packages/pyknow/fact.py",
"repo_name": "IRS-MR/IRS-MR-2019-01-19-IS1PT-GRP-MRCard",
"src_encoding": "UTF-8",
"text": "from itertools import chain\nfrom functools import lru_cache\nimport abc\nimport collections\n\nfrom schema import Schema\n\nfrom pyknow.pattern import Bindable\nfrom pyknow.utils import freeze, unfreeze\nfrom pyknow.conditionalelement import OperableCE\nfrom pyknow.conditionalelement import ConditionalElement\n\n\nclass BaseField(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def validate(self, data):\n \"\"\"Raise an exception on invalid data.\"\"\"\n pass\n\n\nclass Field(BaseField):\n\n NODEFAULT = object()\n\n def __init__(self, schema_definition, mandatory=False, default=NODEFAULT):\n self.validator = Schema(schema_definition)\n self.mandatory = mandatory\n self.default = default\n\n def validate(self, data):\n self.validator.validate(unfreeze(data))\n\n\nclass Validable(type):\n def __new__(mcl, name, bases, nmspc):\n\n # Register fields\n nmspc[\"__fields__\"] = {k: v\n for k, v in nmspc.items()\n if isinstance(v, BaseField)}\n\n return super(Validable, mcl).__new__(mcl, name, bases, nmspc)\n\n\nclass Fact(OperableCE, Bindable, dict, metaclass=Validable):\n \"\"\"Base Fact class\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.update(dict(chain(enumerate(args), kwargs.items())))\n\n @lru_cache()\n def __missing__(self, key):\n if key not in self.__fields__:\n raise KeyError(key)\n else:\n default = self.__fields__[key].default\n if default is Field.NODEFAULT:\n raise KeyError(key)\n elif isinstance(default, collections.abc.Callable):\n return default()\n else:\n return default\n\n def __setitem__(self, key, value):\n if self.__factid__ is None:\n super().__setitem__(key, freeze(value))\n else:\n raise RuntimeError(\"A fact can't be modified after declaration.\")\n\n def validate(self):\n for name, field in self.__fields__.items():\n if name in self:\n try:\n field.validate(self[name])\n except Exception as exc:\n raise ValueError(\n \"Invalid value on field %r for fact %r\"\n % (name, self))\n elif field.mandatory:\n raise ValueError(\n \"Mandatory field %r is not defined for fact %r\"\n % (name, self))\n else:\n pass\n\n def update(self, mapping):\n for k, v in mapping.items():\n self[k] = v\n\n def as_dict(self):\n \"\"\"Return a dictionary containing this `Fact` data.\"\"\"\n return {k: unfreeze(v)\n for k, v in self.items()\n if not self.is_special(k)}\n\n def copy(self):\n \"\"\"Return a copy of this `Fact`.\"\"\"\n content = [(k, v) for k, v in self.items()]\n\n intidx = [(k, v) for k, v in content if isinstance(k, int)]\n args = [v for k, v in sorted(intidx)]\n\n kwargs = {k: v\n for k, v in content\n if not isinstance(k, int) and not self.is_special(k)}\n return self.__class__(*args, **kwargs)\n\n def has_field_constraints(self):\n return any(isinstance(v, ConditionalElement) for v in self.values())\n\n def has_nested_accessor(self):\n return any((\"__\" in str(k).strip('__') for k in self.keys()))\n\n @staticmethod\n def is_special(key):\n return (isinstance(key, str)\n and key.startswith('__')\n and key.endswith('__'))\n\n @property\n def __bind__(self):\n return self.get('__bind__', None)\n\n @__bind__.setter\n def __bind__(self, value):\n super().__setitem__('__bind__', value)\n\n @property\n def __factid__(self):\n return self.get('__factid__', None)\n\n @__factid__.setter\n def __factid__(self, value):\n super().__setitem__('__factid__', value)\n\n @classmethod\n def from_iter(cls, pairs):\n obj = cls()\n obj.update(dict(pairs))\n return obj\n\n def __str__(self): # pragma: no cover\n if self.__factid__ is None:\n return \"<Undeclared Fact> %r\" % self\n else:\n return \"<f-%d>\" % self.__factid__\n\n def __repr__(self): # pragma: no cover\n return \"{}({})\".format(\n self.__class__.__name__,\n \", \".join(\n (repr(v) if isinstance(k, int) else \"{}={!r}\".format(k, v)\n for k, v in self.items()\n if not self.is_special(k))))\n\n def __hash__(self):\n try:\n return self._hash\n except AttributeError:\n self._hash = hash(frozenset(self.items()))\n return self._hash\n\n def __eq__(self, other):\n return (self.__class__ == other.__class__\n and super().__eq__(other))\n\n\nclass InitialFact(Fact):\n \"\"\"\n InitialFact\n \"\"\"\n pass\n"
}
] | 18 |
worldofdub/aws-support-tools
|
https://github.com/worldofdub/aws-support-tools
|
e8e8bf4e62edae3ace7b6e9ef9b735e990ac8da5
|
e0b384490dcfb8e257fbccff89d9adbbadaf7f5b
|
69552fc7aa2b551071307ce8077ffd9cfc1828b2
|
refs/heads/master
| 2021-05-05T05:28:45.899508 | 2017-12-29T21:04:18 | 2017-12-29T21:04:18 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6481121182441711,
"alphanum_fraction": 0.6531724333763123,
"avg_line_length": 29.951807022094727,
"blob_id": "a0cadc6ffaed6c9f697430756b5fdb371761d06a",
"content_id": "e7a03a6f3151d75e332406cd4fd920a96e601ab9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2569,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 83,
"path": "/GuardDuty/Defender/cfn/src/sg.py",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"Created by: David Pigliavento\"\"\"\n\n# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file\n# except in compliance with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\n\nimport boto3\nimport os\n\nSECURITY_GROUP_NAME = 'guard-duty-lock-down'\nSECURITY_GROUP_DESCRIPTION = 'restricts inbound/outbound access for compromised instance'\n\n# AWS_DEFAULT_REGION is available during lambda execution to get current region\nAWS_DEFAULT_REGION = os.getenv(\"AWS_DEFAULT_REGION\")\n\nclient = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)\n\n\ndef describe_security_group(name, vpc_id):\n response = client.describe_security_groups(\n Filters=[\n {\n 'Name': 'group-name',\n 'Values': [\n name,\n ]\n },\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc_id,\n ]\n }\n ]\n )\n\n if len(response['SecurityGroups']) != 1:\n return None\n\n return response['SecurityGroups'][0]\n\n\ndef create_security_group(description, name, vpc_id):\n response = client.create_security_group(\n Description=description,\n GroupName=name,\n VpcId=vpc_id\n )\n group_id = response['GroupId']\n\n return group_id\n\n\ndef revoke_security_group_egress(security_group):\n group_id = security_group['GroupId']\n ip_permissions = security_group['IpPermissionsEgress']\n\n response = client.revoke_security_group_egress(\n GroupId=group_id,\n IpPermissions=ip_permissions\n )\n\n\ndef get_lockdown_security_group(vpc_id):\n lock_down_security_group = describe_security_group(SECURITY_GROUP_NAME, vpc_id)\n\n if lock_down_security_group == None:\n print('Creating security group {}'.format(SECURITY_GROUP_NAME))\n\n group_id = create_security_group(SECURITY_GROUP_DESCRIPTION, SECURITY_GROUP_NAME, vpc_id)\n lock_down_security_group = describe_security_group(SECURITY_GROUP_NAME, vpc_id)\n revoke_security_group_egress(lock_down_security_group)\n\n print('Created security group {}'.format(group_id))\n\n return lock_down_security_group['GroupId']\n"
},
{
"alpha_fraction": 0.704954981803894,
"alphanum_fraction": 0.7145270109176636,
"avg_line_length": 33.153846740722656,
"blob_id": "bd6e2d624d578b4d282d5a5e7e4476faa44abbd8",
"content_id": "b7a970bdc956a83af06294ddbdc700afdc0f306e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1776,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 52,
"path": "/GuardDuty/Defender/cfn/src/ec2.py",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"Created by: David Pigliavento\"\"\"\n\n# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file\n# except in compliance with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\n\nimport boto3\nimport os\n\n# AWS_DEFAULT_REGION is available during lambda execution to get current region\nAWS_DEFAULT_REGION = os.getenv(\"AWS_DEFAULT_REGION\")\n\nclient = boto3.client('ec2', region_name=AWS_DEFAULT_REGION)\n\n\ndef set_network_interface_security_group(interface_id, security_group):\n response = client.modify_network_interface_attribute(\n Groups=[\n security_group,\n ],\n NetworkInterfaceId=interface_id\n )\n\n\ndef describe_ec2_instance(instance_id):\n response = client.describe_instances(\n InstanceIds=[\n instance_id,\n ]\n )\n return response['Reservations'][0]['Instances'][0]\n\n\ndef get_instance_vpc(instance_id):\n instance = describe_ec2_instance(instance_id)\n return instance['VpcId']\n\n\ndef lock_down_ec2_instance(instance_id, lock_down_security_group):\n instance = describe_ec2_instance(instance_id)\n\n for interface in instance['NetworkInterfaces']:\n interface_id = interface['NetworkInterfaceId']\n set_network_interface_security_group(interface_id, lock_down_security_group)\n print('Setting inteface {} security group to {}'.format(interface_id, lock_down_security_group))\n"
},
{
"alpha_fraction": 0.7862595319747925,
"alphanum_fraction": 0.790839672088623,
"avg_line_length": 36.78845977783203,
"blob_id": "92b8b620e6e15656515aac84b0bf11e64fef334f",
"content_id": "bcbf8cced651b12e5c16649527e61fde8153baa1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1965,
"license_type": "permissive",
"max_line_length": 371,
"num_lines": 52,
"path": "/GuardDuty/Defender/README.md",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "# GuardDuty Defender\n\nThis project provides an example Lambda function to action GuardDuty findings.\n\nIncluded is a Cloudformation template to deploy the Lambda function and associated CloudWatch event rule.\n\n### How it Works\n\nThe Lambda function is invoked when new GuardDuty findings are generated. When a GuardDuty finding indicates an EC2 instance is the ACTOR for a High severity finding, all existing security groups will be removed from the instance and a restricted security group is assigned. This retains the running state of the instance for forensics while mitigating the risk it poses.\n\nThe restricted security group will be automatically created, and all ingress and egress traffic will be denied.\n\nThe template also creates an SNS topic that you can subscribe to for email notifications when any action is taken.\n\n### Cloudformation Template Parameters\n|Parameter|Purpose|\n|---------|-------|\n|Role|ARN of the IAM role that provides Lambda execution access|\n\n### Permissions\n\nThe role used by Defender requires the following IAM permissions:\n- logs:CreateLogStream\n- logs:PutLogEvents\n- ec2:Describe*\n- ec2:AuthorizeSecurityGroupEgress\n- ec2:AuthorizeSecurityGroupIngress\n- ec2:CreateSecurityGroup\n- ec2:RevokeSecurityGroupEgress\n- ec2:RevokeSecurityGroupIngress\n- ec2:ModifyNetworkInterfaceAttribute\n- sns:Publish\n\n### Deploy\nThe Cloudformation template references the local source directory and can be deployed using aws cloudformation package command.\n\nExample commands to package and deploy the template:\n\n```\naws cloudformation package \\\n --template-file stack.yaml \\\n --s3-bucket my-lambda-code-bucket \\\n --output-template-file deploy.yaml\n\naws cloudformation deploy \\\n --template-file deploy.yaml \\\n --stack-name guardduty-defender \\\n --parameter-overrides \\\n Role=arn:aws:iam::xxxxxxxxxxxx:role/lambda_execution_role\n```\n\nTo simplify deployment you can use deploy.sh, a wrapper script around aws cloudformation cli tool.\n"
},
{
"alpha_fraction": 0.625676691532135,
"alphanum_fraction": 0.6403712034225464,
"avg_line_length": 23.865385055541992,
"blob_id": "b0dcbcfc9bda902b42d1c643c811a6a5b207163e",
"content_id": "cfae1f19210dee48e0817e3c037f5657ffb86bfa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1293,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 52,
"path": "/GuardDuty/Defender/cfn/deploy.sh",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Created by: David Pigliavento\n\n# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file\n# except in compliance with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\n\n# Wrapper script to simplify deployment\n\nUSAGE=\"Usage: $0 [-role role_arn] [-bucket s3_bucket_name]\"\n\ncheck_input()\n if [ -z ${ROLE} ] || [ -z ${S3_BUCKET} ]; then\n echo \"$USAGE\"\n exit 1\n fi\n\nwhile (( \"$#\" )); do\n\n if [[ ${1} == \"-role\" ]]; then\n \tshift\n ROLE=${1}\n fi\n\n if [[ ${1} == \"-bucket\" ]]; then\n \tshift\n S3_BUCKET=\"${1}\"\n fi\n\n shift\ndone\n\ncheck_input\n\naws cloudformation package \\\n --template-file stack.yaml \\\n --s3-bucket ${S3_BUCKET} \\\n --output-template-file deploy.yaml\n\naws cloudformation deploy \\\n --template-file deploy.yaml \\\n --stack-name guardduty-defender \\\n --parameter-overrides \\\n Role=$ROLE\n"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 25,
"blob_id": "d09be52e033df4dd90a09b343450496880d12b59",
"content_id": "1affc47912df68e67a32947cd56bba5eb0373ad9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 1,
"path": "/GuardDuty/README.md",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "# GuardDuty Support Tools\n"
},
{
"alpha_fraction": 0.7097046375274658,
"alphanum_fraction": 0.7198312282562256,
"avg_line_length": 32.85714340209961,
"blob_id": "c5e036dcc4baaed742fe470d378e0ad3a269f645",
"content_id": "98f18e1ea119fb08dd33fc9cd5d8523b46ac9674",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1185,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 35,
"path": "/GuardDuty/Defender/cfn/src/sns.py",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"Created by: David Pigliavento\"\"\"\n\n# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file\n# except in compliance with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\n\nimport boto3\nimport os\n\n# SNS topic used to send notification\nSNS_TOPIC = os.getenv(\"SNS_TOPIC\")\n# AWS_DEFAULT_REGION is available during lambda execution to get current region\nAWS_DEFAULT_REGION = os.getenv(\"AWS_DEFAULT_REGION\")\n\nclient = boto3.client('sns', region_name=AWS_DEFAULT_REGION)\n\n\ndef publish_sns_email_message(message_body, severity):\n\n if severity >= 8.0:\n severity = 'High'\n\n response = client.publish(\n TopicArn=SNS_TOPIC,\n Message=message_body,\n Subject='GuardDuty Finding: {} Severity'.format(severity),\n MessageStructure='string'\n )\n"
},
{
"alpha_fraction": 0.7120925784111023,
"alphanum_fraction": 0.7309872508049011,
"avg_line_length": 46.58427047729492,
"blob_id": "625b4f12ad4354c72346d37b255dbe3587ee4bc0",
"content_id": "4999a7012afd24a001afa6a13ffab1976e6c617a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4234,
"license_type": "permissive",
"max_line_length": 383,
"num_lines": 89,
"path": "/Cognito/decode-verify-jwt/README.md",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "# Decode and verify Amazon Cognito JWT tokens\n\n## Short Description\n\nIn many cases, when using a Cognito User Pool for authentication, it would be nice to have the details of the logged in user available in our back-end application. Some examples are:\n\n- API Gateway using a User Pool for authorisation; but after that the backend integration is not aware of the details of the user who invoked the API.\n- A Cognito Identity Pool is used to retrieve STS temporary credentials and then Lambda is invoked; but Lambda has no knowledge of the identity of the user that originally authenticated against the User Pool.\n\nIn all those cases it would be necessary to pass the user details in the payload to the the backend, but how can we ensure that those details don't get spoofed?\n\n## Resolution\n\nLuckily the JSON Web Tokens (JWT) come to help us. Upon login, Cognito User Pool returns a base64-encoded JSON string called JWT that contains important information (called claims) about the user. It actually returns 3 tokens called ID, Access and Refresh token, each one with its own purpose; however the token containing all the user fields defined in the User Pool, is the ID one.\n\nEvery JWT token is composed of 3 sections: header, payload and signature. Let's have a look at the content of a sample ID Token:\n\n```json\n{\n \"kid\": \"abcdefghijklmnopqrstuvwxyz=\",\n \"alg\": \"RS256\"\n}\n```\nThe header contains the algorithm used to sign the token (in our case is RS256 which means RSA signature with SHA-256) and a Key ID (kid) that we'll need later on.\n\n```json\n{\n \"sub\": \"aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee\",\n \"aud\": \"xxxxxxxxxxxxxxxxxxx\",\n \"email_verified\": true,\n \"token_use\": \"id\",\n \"auth_time\": 1500009400,\n \"iss\": \"https://cognito-idp.ap-southeast-2.amazonaws.com/ap-southeast-2_XXXxxXXxX\",\n \"cognito:username\": \"emanuele\",\n \"exp\": 1500013000,\n \"given_name\": \"Emanuele\",\n \"iat\": 1500009400,\n \"email\": \"[email protected]\"\n}\n```\nThe payload contains information about the user as well as token creation and expiration dates.\n\nThe third section is the signature of a hashed combination of the header and the payload. In particular Cognito generates two pairs of RSA keys for each User Pool, then uses one of the private keys to sign the token and makes the corresponding public key available at the address\n\n```\nhttps://cognito-idp.{region}.amazonaws.com/{userPoolId}/.well-known/jwks.json\n```\n\nSuch JSON file looks like this\n\n```json\n{\n \"keys\": [{\n \"alg\": \"RS256\",\n \"e\": \"AQAB\",\n \"kid\": \"abcdefghijklmnopqrstuvwxyz=\",\n \"kty\": \"RSA\",\n \"n\": \"lsjhglskjhgslkjgh43lj5h34lkjh34lkjht34ljth3l\",\n \"use\": \"sig\"\n }, {\n \"alg\": \"RS256\",\n \"e\": \"AQAB\",\n \"kid\": \"fgjhlkhjlkhj5jkl5h=\",\n \"kty\": \"RSA\",\n \"n\": \"sgjhlk6jp98ugp98up34hpoi65hgh\",\n \"use\": \"sig\"\n }]\n}\n```\n\nAll we need to do is to search for the key with a kid matching the kid in our JWT token and then use some libraries to decode the token and verify its signature. The good news is that we can pass the whole token in the payload to the back-end application and rest assured that it cannot be tampered with.\n\nThis solution is applicable to virtually any applications that want to verify the identity of a Cognito user from the JWT token, but since a common requirement is to do it from AWS Lambda, I wrote some sample Lambda code in Python 2.7 and NodeJS 4.3.\n\n## Requirements\n\n### Python 2.7\nFor the Python version I've used <a href=\"https://github.com/mpdavis/python-jose\">python-jose</a>, to handle the JWT token decoding and signature verification; that library is already part of the Lambda environment, so no additional steps are required.\nHowever, if ran locally, it will be necessary to install it. As an example, can be installed via \"pip\" with\n```\npip install python-jose\n```\n\n### NodeJS 4.3\nFor the NodeJS version I've used <a href=\"https://www.npmjs.com/package/node-jose\">node-jose</a> which needs to be included in the Lambda deployment package. It can be done via NPM with\n```\nnpm install node-jose\n```\nfrom within the script directory. Please refer to the AWS <a href=\"http://docs.aws.amazon.com/lambda/latest/dg/nodejs-create-deployment-pkg.html\">documentation</a> for more details."
},
{
"alpha_fraction": 0.650175154209137,
"alphanum_fraction": 0.6606830358505249,
"avg_line_length": 39.07017517089844,
"blob_id": "8533394d3ce39b01918f93f187ac95b0c59c8044",
"content_id": "4de8f43af317f44ad444250c9c7ea24590fee27f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2284,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 57,
"path": "/GuardDuty/Defender/cfn/src/defender.py",
"repo_name": "worldofdub/aws-support-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"Created by: David Pigliavento\"\"\"\n\n# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file\n# except in compliance with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\n\nimport boto3\nimport sys\nimport sg\nimport ec2\nimport sns\n\n\ndef lambda_handler(event, context):\n gd_severity = event['detail']['severity']\n gd_type = event['detail']['type']\n gd_resource = event['detail']['resource']\n gd_resource_type = gd_resource['resourceType']\n gd_resource_role = event['detail']['service']['resourceRole']\n\n print('gd_severity = {}'.format(gd_severity))\n print('gd_type = {}'.format(gd_type))\n print('gd_resource = {}'.format(gd_resource))\n print('gd_resource_role = {}'.format(gd_resource_role))\n\n if gd_resource_type == 'Instance' and gd_resource_role == 'ACTOR':\n instance_id = gd_resource['instanceDetails']['instanceId']\n print('instance_id = {}'.format(instance_id))\n\n if instance_id == 'i-99999999':\n print('No action for sample finding')\n sys.exit(0)\n\n if gd_severity >= 8.0:\n # Get instance vpc_id\n vpc_id = ec2.get_instance_vpc(instance_id)\n\n # Get lock down security group for vpc_id\n # If lock down security group does not exist it will be created\n # Lock down group has no ingress or egress rules\n lock_down_security_group = sg.get_lockdown_security_group(vpc_id)\n\n # Change security group for all instance interfaces\n ec2.lock_down_ec2_instance(instance_id, lock_down_security_group)\n\n print('instance {} was locked down with security group {}'.format(instance_id, lock_down_security_group))\n\n message_body = 'instance {} was locked down based on guardduty finding: \\n\\n {}'.format(instance_id, event)\n\n sns.publish_sns_email_message(message_body, gd_severity)\n"
}
] | 8 |
LionStudent/wolf
|
https://github.com/LionStudent/wolf
|
ab74a1506411ef805b5ec72400171ea3be03d1d7
|
086ab85d458c234ae7219b66729f100f01f41c2d
|
d834f5bf4af01421cbf440724a1da86a8c89e838
|
refs/heads/master
| 2022-09-05T17:46:00.748184 | 2020-05-30T03:22:55 | 2020-05-30T03:22:55 | 263,093,009 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5092478394508362,
"alphanum_fraction": 0.5351418256759644,
"avg_line_length": 12.762711524963379,
"blob_id": "5341a16e85f597e1298dd843db84a566009a2cea",
"content_id": "04d4d4e3e951cf8453f2f4046219073514647161",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 811,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 59,
"path": "/store.py",
"repo_name": "LionStudent/wolf",
"src_encoding": "UTF-8",
"text": "import mysql.connector\n\n \n\ndef connect():\n\n db = mysql.connector.connect(\n\n host='sql3.freemysqlhosting.net',\n\n port=3306,\n\n database='sql3337629',\n\n user='sql3337629',\n\n password='IGseDDutut'\n\n )\n\n cursor=db.cursor()\n\n return db, cursor\n\n\ndef searchTextData(name):\n\n db, cursor = connect()\n\n\n query = 'select isTotal,isRaw,metric,fromDate,toDate,url from covid19 where name=\"%s\"' % name\n\n cursor.execute(query)\n\n retval=[]\n\n for isTotal,isRaw,metric,fromDate,toDate,url in cursor:\n\n retval.append({\n\n 'isTotal': isTotal,\n\n 'isRaw': isRaw,\n\n 'metric': metric,\n\n 'from': fromDate,\n\n 'to': toDate,\n\n 'url': url\n\n })\n\n \n\n db.close()\n\n return retval"
},
{
"alpha_fraction": 0.9038461446762085,
"alphanum_fraction": 0.9038461446762085,
"avg_line_length": 9.600000381469727,
"blob_id": "e1ca949111018d9138f32a7447df6a0d14c216b5",
"content_id": "50cab9edb2ac08f656648d18f5a435a7af7b6b89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "LionStudent/wolf",
"src_encoding": "UTF-8",
"text": "matplotlib\nflask\nmysql-connector\nrequests\ncloudinary"
}
] | 2 |
nanomesher/Nanomesher_NanoSound_moode
|
https://github.com/nanomesher/Nanomesher_NanoSound_moode
|
5911f9ee5559a360729d6f3e558f1d047f0cbe81
|
9c34a2f12b076126b3ab6df77964ed2df1eccafd
|
654d7603fd90407d3f183c310e74c55828b596c2
|
refs/heads/master
| 2023-06-12T23:26:05.972070 | 2020-08-04T03:45:57 | 2020-08-04T03:45:57 | 272,746,195 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7916666865348816,
"avg_line_length": 30.66666603088379,
"blob_id": "fca7f4898214db84d392471ed4c685e7f561d0ef",
"content_id": "a745796b69be0e80c9159a5fdcdb7c04a9a36779",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 3,
"path": "/nanosound_moode/nanodac_lirc",
"repo_name": "nanomesher/Nanomesher_NanoSound_moode",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ncd /home/pi/Nanomesher_NanoSound_moode/nanosound_moode\nsudo python nanodac_lirc.py\n\n"
},
{
"alpha_fraction": 0.5602467060089111,
"alphanum_fraction": 0.5717899799346924,
"avg_line_length": 24.095237731933594,
"blob_id": "782f15b7c375830e4170463a8ae7b6492a0668a8",
"content_id": "47d4056ca7f45284af9716f89fca1dde924724fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6324,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 252,
"path": "/nanosound_moode/nanodac_lirc.py",
"repo_name": "nanomesher/Nanomesher_NanoSound_moode",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nPYTHONIOENCODING = \"UTF-8\"\n#from socketIO_client import SocketIO, LoggingNamespace\nfrom time import time\nfrom mpd import MPDClient\nimport RPi.GPIO as GPIO\n#import urllib2\nimport json\nimport ConfigParser\nimport os\nimport os.path\n\n\ndef mute():\n os.system('/var/www/vol.sh mute')\n\n\ndef unmute():\n os.system('/var/www/vol.sh mute')\n\n\ndef unrandom():\n os.system('mpc random off')\n\n\ndef randomset():\n os.system('mpc random on')\n\n\ndef repeat():\n os.system('mpc repeat on')\n\n\ndef unrepeat():\n os.system('mpc repeat off')\n\n\ndef toggle():\n os.system('mpc toggle')\n\n\ndef songprev():\n os.system('mpc prev')\n\n\ndef songnext():\n os.system('mpc next')\n\n\ndef volup():\n os.system('/var/www/vol.sh up 5')\n\n\ndef voldown():\n os.system('/var/www/vol.sh dn 5')\n\n\ndef stop():\n os.system('mpc stop')\n\n\n# listPlayList()\n\ndef setup():\n GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location\n GPIO.setup(11, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n\ndef binary_aquire(pin, duration):\n # aquires data as quickly as possible\n t0 = time()\n results = []\n while (time() - t0) < duration:\n results.append(GPIO.input(pin))\n return results\n\n\ndef on_ir_receive(pinNo, bouncetime=150):\n # when edge detect is called (which requires less CPU than constant\n # data acquisition), we acquire data as quickly as possible\n data = binary_aquire(pinNo, bouncetime / 1000.0)\n if len(data) < bouncetime:\n return\n rate = len(data) / (bouncetime / 1000.0)\n pulses = []\n i_break = 0\n # detect run lengths using the acquisition rate to turn the times in to microseconds\n for i in range(1, len(data)):\n if (data[i] != data[i - 1]) or (i == len(data) - 1):\n pulses.append((data[i - 1], int((i - i_break) / rate * 1e6)))\n i_break = i\n # decode ( < 1 ms \"1\" pulse is a 1, > 1 ms \"1\" pulse is a 1, longer than 2 ms pulse is something else)\n # does not decode channel, which may be a piece of the information after the long 1 pulse in the middle\n outbin = \"\"\n for val, us in pulses:\n if val != 1:\n continue\n if outbin and us > 2000:\n break\n elif us < 1000:\n outbin += \"0\"\n elif 1000 < us < 2000:\n outbin += \"1\"\n try:\n return int(outbin, 2)\n except ValueError:\n # probably an empty code\n return None\n\n\ndef destroy():\n GPIO.cleanup()\n\ndef RefreshStat():\n global muted, randomed, repeated\n client = MPDClient() \n client.connect(\"localhost\", 6600)\n\n volstatus = client.status()\n\n if ('repeat' in volstatus):\n if (volstatus['repeat']==\"1\"):\n repeated = True\n else:\n repeated = False\n else:\n repeated = False\n\n if ('random' in volstatus):\n if (volstatus['random']==\"1\"):\n randomed = True\n else:\n randomed = False\n else:\n randomed = False\n\n if ('volume' in volstatus):\n if (volstatus['volume']==\"0\"):\n muted = True\n else:\n muted = False\n else:\n muted = False\n\n client.close()\n \n\nmuted = False\nrandomed = False\nrepeated = False # 0-no repeat , 1-repeat one, 2-repeat\n\n\nconfig = ConfigParser.ConfigParser()\n\nif(os.path.isfile('/home/pi/nanosound_keys.ini')):\n config.read('/home/pi/nanosound_keys.ini')\nelse:\n config.read('/home/pi/Nanomesher_NanoSound_moode/nanosound_moode/conf/nanosound_keys.ini')\n\nMUTE_BUTTON = config.get('Default','MUTE_BUTTON')\nPREV_BUTTON = config.get('Default','PREV_BUTTON')\nNEXT_BUTTON = config.get('Default','NEXT_BUTTON')\nTOGGLE_BUTTON = config.get('Default','TOGGLE_BUTTON')\nVOLUP_BUTTON = config.get('Default','VOLUP_BUTTON')\nVOLDOWN_BUTTON = config.get('Default','VOLDOWN_BUTTON')\nNEXTPLAYLIST_BUTTON = config.get('Default','NEXTPLAYLIST_BUTTON')\nPREVPLAYLIST_BUTTON = config.get('Default','PREVPLAYLIST_BUTTON')\nRANDOM_BUTTON = config.get('Default','RANDOM_BUTTON')\nREPEAT_BUTTON = config.get('Default','REPEAT_BUTTON')\nSTOP_BUTTON = config.get('Default','STOP_BUTTON')\n\n\n\nRefreshStat()\n\nif __name__ == \"__main__\":\n setup()\n\n print(\"Starting IR Listener\")\n while True:\n\n #print(\"Waiting for signal\")\n GPIO.wait_for_edge(11, GPIO.FALLING)\n code = on_ir_receive(11)\n if code:\n hexcode = str(hex(code))\n RefreshStat()\n if (hexcode == MUTE_BUTTON) and (not muted):\n muted = True\n mute()\n elif (hexcode == MUTE_BUTTON) and (muted):\n muted = False\n unmute()\n elif (hexcode == PREV_BUTTON):\n songprev()\n elif (hexcode == NEXT_BUTTON):\n songnext()\n elif (hexcode == TOGGLE_BUTTON):\n toggle()\n elif (hexcode == VOLUP_BUTTON):\n volup()\n elif (hexcode == VOLDOWN_BUTTON):\n voldown()\n elif (not randomed) and (hexcode == RANDOM_BUTTON):\n randomed = True\n randomset()\n elif (randomed) and (hexcode == RANDOM_BUTTON):\n randomed = False\n unrandom()\n elif (not repeated) and (hexcode == REPEAT_BUTTON):\n repeated = True\n repeat()\n elif (repeated) and (hexcode == REPEAT_BUTTON):\n repeated = False\n unrepeat()\n elif (hexcode == STOP_BUTTON):\n stop()\n\n# destroy()\n\n# while(True):\n\n# button = lirc.nextcode()\n# if(len(button)>0):\n# command=button[0]\n\n# if(command==\"listup\"):\n# \t\tplayNextPlaylist()\n# elif(command==\"listdown\"):\n# \t\tplayPrevPlaylist()\t\t\t\t\t\n# elif(not muted) and (command==\"mute\"):\n# muted=True\n# mute()\n# elif(muted) and (command==\"mute\"):\n# muted=False\n# unmute()\n# elif(not randomed) and (command==\"random\"):\n# randomed=True\n# randomset()\n# elif(randomed) and (command==\"random\"):\n# randomed=False\n# unrandom()\n# elif(not repeated) and (command==\"repeat\"):\n# repeated=True\n# repeat()\n# elif(repeated) and (command==\"repeat\"):\n# repeated=False\n# unrepeat()\n"
},
{
"alpha_fraction": 0.7971698045730591,
"alphanum_fraction": 0.801886796951294,
"avg_line_length": 25.5,
"blob_id": "b5ef43299889c9a837605d4b3c91ff03f26aa409",
"content_id": "b29237c29e6c36f75c6c00b9c52a6dade934aa49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 8,
"path": "/install.sh",
"repo_name": "nanomesher/Nanomesher_NanoSound_moode",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsudo pip install python-mpd2\n\nsudo cp nanosound_lirc.service /lib/systemd/system/\nsudo /bin/systemctl daemon-reload\nsudo /bin/systemctl enable nanosound_lirc\nsudo /bin/systemctl start nanosound_lirc\n"
}
] | 3 |
chipsandtea/CMPS142
|
https://github.com/chipsandtea/CMPS142
|
947103f557f07c78781a80a7bed259e25e62439c
|
0bdaa7c9afd983a392c17d72dd881fa704539427
|
4b07555cfb21c560f111cf390df860f5720fc4eb
|
refs/heads/master
| 2021-01-19T21:34:13.123099 | 2017-06-19T23:27:30 | 2017-06-19T23:27:30 | 88,668,366 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5356289148330688,
"alphanum_fraction": 0.6049736738204956,
"avg_line_length": 27.657533645629883,
"blob_id": "f3a2cfcb8b4f3c1521ab94eab98c773cbfdd1468",
"content_id": "3ede0626f1e2802eb348737642760de1c3ae3940",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2091,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 73,
"path": "/HW1/runme.py",
"repo_name": "chipsandtea/CMPS142",
"src_encoding": "UTF-8",
"text": "from prob3 import generate, closed_form, gradientDescent\nimport numpy as np\n\nprint('=======CLOSED FORM=======')\nprint('=====OUTPUT ~ [2, -1]=====')\ntrain_set, values = generate((-1, 1), 10000, 0.1)\ntest_set, vals = generate((-1, 1), 10000, 0.1)\nk = [10, 100, 1000, 10000]\nprint(values.shape)\nprint('TRAIN SET')\nfor i in k:\n\tk_val = i\n\tval = closed_form(train_set, values, i)\n\tprint('kval: \\t' + str(k_val) + '\\t val:' + str(val))\n\tprint('t1 error: ' + str(2-val[0,0]) + ' t2 error:' + str(-1-val[0,1]))\nprint('TEST SET')\nfor i in k:\n\tk_val = i\n\tval = closed_form(test_set, vals, i)\n\tprint('kval: \\t' + str(k_val) + '\\t val:' + str(val))\n\tprint('t1 error: ' + str(2-val[0,0]) + ' t2 error:' + str(-1-val[0,1]))\n#print(closed_form(train_set, values, 500))\nprint('=====GRADIENT DESCENT=====')\nX, y = generate((-1, 1), 10000, 0.01)\n\n#theta_closed = closed_form(X, y, 1000)\ntheta = np.array([0, 0])\n\n#print(X.shape)\ny = np.asmatrix(y)\ny = y.T\n#print(y.shape)\n#print(theta.shape)\n\nval = closed_form(X, y, 10000)\nprint('kval: \\t' + str(k_val) + '\\t val:' + str(val))\nw, ctr= gradientDescent(X, y, theta, 1, 0.01, 20)\nprint(1)\nprint(w)\nprint(ctr)\nprint('ERROR')\nprint(w-val)\nw, ctr= gradientDescent(X, y, theta, 0.1, 0.01, 20)\nprint(0.1)\nprint(w)\nprint(ctr)\nprint('ERROR')\nprint(w-val)\nw, ctr= gradientDescent(X, y, theta, 0.01, 0.01, 20)\nprint(0.01)\nprint(w)\nprint(ctr)\nprint('ERROR')\nprint(w-val)\nw, ctr= gradientDescent(X, y, theta, 0.001, 0.01, 20)\nprint(0.001)\nprint(w)\nprint(ctr)\nprint('ERROR')\nprint(w-val)\n\n\n'''\nJ_closed = mean_squared_error(X, y, theta_closed)\ntheta_1, T1 = batch_gradient_descent(X, y, 1)\ntheta_2, T2 = batch_gradient_descent(X, y, 0.1)\ntheta_3, T3 = batch_gradient_descent(X, y, 0.01)\ntheta_4, T4 = batch_gradient_descent(X, y, 0.001)\n\nprint('1: ' + (str(mean_squared_error(X, y, theta_1) - J_closed)) + ' ' + str(T1))\nprint('2: ' + (str(mean_squared_error(X, y, theta_2) - J_closed)) + ' ' + str(T2))\nprint('3: ' + (str(mean_squared_error(X, y, theta_3) - J_closed)) + ' ' + str(T3))\nprint('4: ' + (str(mean_squared_error(X, y, theta_4) - J_closed)) + ' ' + str(T4))'''"
},
{
"alpha_fraction": 0.6404308080673218,
"alphanum_fraction": 0.6545153260231018,
"avg_line_length": 21.370370864868164,
"blob_id": "f90f40a05b7bcf3d5ff154a4b1f4f3e12c961aa0",
"content_id": "a892be3f0a74291954e9d885903a388439c88138",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1207,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 54,
"path": "/HW2/add10.py",
"repo_name": "chipsandtea/CMPS142",
"src_encoding": "UTF-8",
"text": "# credit to jaredjxyz\n\nimport arff\nimport random\n\nattribute = 'preg'\nfilename = 'diabetes.arff'\n\ndef main():\n\tpartg()\n\tparth()\n\ndef partg():\n\tdata = arff.load(open(filename))\n\n\t# find the attribute index\n\tfor attribute_index in range(len(data['attributes'])):\n\t\tif data['attributes'][attribute_index][0] == attribute:\n\t\t\tbreak\n\n\tif attribute_index == len(data['attributes']):\n\t\tprint(\"The given attribute name does not exist.\")\n\t\texit(1)\n\n\tattribute_type = data['attributes'][attribute_index][1]\n\n\t# Add attribute 10 times\n\tfor i in range(10):\n\t\t# Add a new attribute\n\t\tdata['attributes'].insert(0, (attribute + str(i), attribute_type))\n\n\t\t# Append a copy to each piece of data\n\t\tfor j in range(len(data['data'])):\n\t\t\tdata['data'][j].insert(0, data['data'][j][attribute_index])\n\n\t# Write a new file\n\tp4g_file = open('partg-' + filename, 'w')\n\tp4g_file.write(arff.dumps(data))\n\ndef parth():\n\tdata = arff.load(open(filename))\n\n\tfor i in range(20):\n\t\tdata['attributes'].insert(0, ('RANDOM' + str(i), 'NUMERIC'))\n\t\tfor j in range(len(data['data'])):\n\t\t\tdata['data'][j].insert(0, random.random())\n\n\tp4h_file = open('parth-' + filename, 'w')\n\tp4h_file.write(arff.dumps(data))\n\n\n\nif __name__ == '__main__':\n\tmain()"
},
{
"alpha_fraction": 0.5372124314308167,
"alphanum_fraction": 0.5872800946235657,
"avg_line_length": 22.79032325744629,
"blob_id": "f90e04ecefcbbe0c207a8413decf74594eaacd5b",
"content_id": "78be67b1cca0472a35245439a8b4f3e9af4bbcc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1478,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 62,
"path": "/HW1/prob3.py",
"repo_name": "chipsandtea/CMPS142",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n#arr = np.zeros(shape=(1,10000))\n\n#train_set = np.random.uniform(-1,1,10000)\n#test_set = np.random.uniform(-1,1,10000)\n\n#for i in range(0, 10):\n #arr[0][i] = ((s1[i], s2[i]), 2*s1[i] - s2[i] + np.random.normal(0, 0.01))\n\n#epsilon = np.random.normal(0, variance, m)\n#thetaJ = (0, 0)\n\n\n# theta = (xt * x)^-1 * xty\ndef generate(interval, m, deviation):\n\txtrain_1 = np.random.uniform(interval[0], interval[1], m)\n\txtrain_2 = np.random.uniform(interval[0], interval[1], m)\n\txtrain = []\n\tytrain = np.zeros(m)\n\n\tfor i in range(m):\n\t\txtrain.append([xtrain_1[i], xtrain_2[i]])\n\t\tytrain[i] = 2*xtrain_1[i] - xtrain_2[i] + np.random.normal(0, 0.01)\n\treturn np.asmatrix(xtrain), ytrain\n\ndef closed_form(X, y, k):\n\tif k > len(X):\n\t\tprint('k too large')\n\t#print(X[:k])\n\tX = X[:k]\n\ty = y[:k]\n\t#print(len(X))\n\txtrans_x = np.matmul(X.T, X)\n\txtrans_xinv = xtrans_x.I\n\txtrans_xinv_xtrans = np.matmul(xtrans_xinv, X.T)\n\tclosed = np.matmul(xtrans_xinv_xtrans, y)\n\treturn closed\n\n# GRADIENT DESCENT\n\n\ndef gradientDescent(x, y, theta, alpha, m, numIterations):\n N = len(x)\n w = np.zeros((x.shape[1], 1))\n eta = alpha\n old = np.zeros(shape=(1000, 1))\n old = np.asmatrix(old)\n count = 0\n while True:\n \terror = x*w - y\n \tif (2-w[0]) < 0.001:\n \t\t#print(2-w[0])\n \t\tbreak\n \tif eta > 0.001:\n \t\teta /= 2\n \tgradient = x.T * error / N\n \tw = w - eta * gradient\n \tcount += 1\n \t#print(count)\n return w, count\n#def gradient_descent():\n\n\n\n"
},
{
"alpha_fraction": 0.59375,
"alphanum_fraction": 0.59375,
"avg_line_length": 31,
"blob_id": "ef815d34d6ad1fea8f1931aafa0e4ae506cf5632",
"content_id": "5355738e8730876ef242cdd0b5283b1ebb763c8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 2,
"path": "/HW1/closedform.py",
"repo_name": "chipsandtea/CMPS142",
"src_encoding": "UTF-8",
"text": "def closed_form(m, k, X, y, deviation):\n\tprint((X.T*X).I*X.T*y)\n"
}
] | 4 |
yamharush/HermitePolynomialsUsingDividedDifferences.py
|
https://github.com/yamharush/HermitePolynomialsUsingDividedDifferences.py
|
79104fe959ed6bd67cff3fbeb59a9b2b086b844a
|
a6d7375005f2e316733cc5aa5338c0e177f0e889
|
a45cc0c17b7cee520964ad15db95c2ccc4933738
|
refs/heads/master
| 2023-06-07T12:39:42.877354 | 2021-07-05T20:19:12 | 2021-07-05T20:19:12 | 383,255,464 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.44359755516052246,
"alphanum_fraction": 0.5066056847572327,
"avg_line_length": 31.439559936523438,
"blob_id": "a309b6d0f657d4a3ffeb675f7ae304234fb8bd74",
"content_id": "de9075a607ca6d97730be2d426fd7b2e2d4c0722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5931,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 182,
"path": "/main.py",
"repo_name": "yamharush/HermitePolynomialsUsingDividedDifferences.py",
"src_encoding": "UTF-8",
"text": "import math\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n BLACK = '\\033[30m'\n RED = '\\033[31m'\n GREEN = '\\033[32m'\n YELLOW = '\\033[33m'\n BLUE = '\\033[34m'\n MAGENTA = '\\033[35m'\n CYAN = '\\033[36m'\n WHITE = '\\033[37m'\n RESET = '\\033[0m'\n\n\ndef printPoints(listOfPoints):\n \"\"\"This function print the list of points\n :param listOfPoints: list of points ( x , y )\n :return: no return value\n \"\"\"\n for i in range(len(listOfPoints)):\n point = \" ( \"\n for j in range(len(listOfPoints[i])):\n point += str(listOfPoints[i][j])\n if j < 1:\n point += \" , \"\n point += \" ) \"\n print(point)\n print(\" \")\n\n\ndef printDerivativeValues(listOfDerivativeVal):\n \"\"\"This function prints the list Of derivative values\n :param listOfDerivativeVal: list of ( x , y' )\n :return: no return value\n \"\"\"\n for i in range(len(listOfDerivativeVal)):\n point = \" f'( \"\n for j in range(len(listOfDerivativeVal[i])):\n point += str(listOfDerivativeVal[i][j])\n if j < 1:\n point += \" ) = \"\n # point += \" ) \"\n print(point)\n print(\" \")\n\n\ndef HermitePolynomialsUsingDividedDifferences(pointsList, derivedValues, value):\n \"\"\" This function calculates Hermite Polynomials using Divided Differences\n :param pointsList: list of points [x,y]\n :param derivedValues: the value of the points in the derivative\n :param value:specific x val\n :return: the value of y at the given point\n \"\"\"\n rows = len(pointsList) * 2 # multiply the number of points - size of z table\n ZTable = [[0]] * rows # build z table\n tempTable = [[0] * 2] * rows\n i, j = 0, 0\n while i != rows: # initialize z table\n ZTable[i] = [pointsList[j][1]]\n ZTable[i + 1] = [pointsList[j][1]]\n tempTable[i] = pointsList[j]\n tempTable[i + 1] = pointsList[j]\n i = i + 2\n j = j + 1\n temp = 0\n for i in range(0, (rows - 1)):\n if i % 2 == 0:\n ZTable[i].append(derivedValues[temp][1])\n temp += 1\n else:\n ZTable[i].append((ZTable[i + 1][0] - ZTable[i][0]) / (tempTable[i + 1][0] - tempTable[i][0]))\n\n i, n = 0, 0\n k = 2\n j = 1\n\n while n <= rows - 2:\n for i in range(rows - k):\n ZTable[i].append((ZTable[i + 1][j] - ZTable[i][j]) / (tempTable[i + k][0] - tempTable[i][0]))\n j += 1\n i = 0\n k += 1\n n += 1\n\n i, k, p = 2, 2, 2\n j = 0\n\n result = (pointsList[0][1]) + ((derivedValues[0][1]) * (value - pointsList[0][0])) # the solution\n for i in range(len(ZTable[0])): # calculate hermit polynomial\n mul = 1\n if i < len(ZTable[0]) - 2:\n for j in range(k):\n mul = mul * (value - tempTable[j][0])\n result += ZTable[0][p] * mul\n j = 0\n p += 1\n if i != (len(ZTable[0]) - 1):\n k += 1\n i = 1\n strOfDividing = \"\\t Z|\\t\"\n for _ in ZTable[0]:\n strOfDividing += str(i) + \"nd divided diff|\\t\"\n i = i + 1\n i, j = 0, 0\n print(\" \")\n print(bcolors.MAGENTA+strOfDividing+bcolors.ENDC)\n for row in ZTable:\n strOfRow = \"z\" + str(i) + \"=\" + str(tempTable[i][j]) + \"\\t\\t\"\n k = 0\n for _ in row:\n strOfRow += str(row[k]) + \"\\t\"\n k = k + 1\n print(strOfRow)\n i = i + 1\n if j % 2 == 0 and j > 0:\n j = j + 1\n print(\"\\n\")\n\n i, k, p = 2, 2, 2\n j = 0\n\n print(\"H{0}({1}) = \".format(rows - 1, value), end=\" \")\n print(\"{0} + {1} * {2} \".format(tempTable[0][1], derivedValues[0][1],\n (math.ceil((value - pointsList[0][0]) * 1000) / 1000)), end=\" \")\n\n for i in range(len(ZTable[0])): # hermite polynomial\n mul = 1\n if i < len(ZTable[0]) - 2:\n for j in range(k):\n mul = mul * (value - tempTable[j][0])\n print(\"+ {0} * {1} \".format(ZTable[0][p], math.ceil(mul * 100000000) / 100000000), end=\" \")\n j = 0\n p += 1\n if i != (len(ZTable[0]) - 1):\n k += 1\n\n print(bcolors.BOLD + bcolors.OKBLUE + \"\\n\\n-----> Final Result: H{0}({1}) = {2}\".format(rows - 1, value, result))\n return result\n\n\ndef main():\n print(\"----------------Hermite Polynomials using Divided Differences----------------\")\n print(\n bcolors.BOLD + bcolors.GREEN + \"\\nH₂ₙ₊₁(X) = f[z₀] + sigma from k = 1 to 2n+1 of f[z₀,...,zₖ](x-z₀)(x-z₁)···(\"\n \"x-zₖ₋₁) \\n\" + bcolors.ENDC)\n print(\"-----------------------------------------------------------------------------\")\n listOfPoints_x_y = [[1.3, 0.620086], [1.6, 0.4554022], [1.9, 0.2818186]]\n derivativeValues = [[1.3, -0.5220232], [1.6, -0.5698959], [1.9, -0.5811571]]\n print(bcolors.MAGENTA + \"The points ( x , y ): \")\n printPoints(listOfPoints_x_y)\n print(\"The points ( x , y' ) \")\n printDerivativeValues(derivativeValues)\n print(bcolors.ENDC + bcolors.BOLD + \"The table: \")\n HermitePolynomialsUsingDividedDifferences(listOfPoints_x_y, derivativeValues, 1.5)\n\n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\nb = [[1, 0], [2, 0.6931]]\nbDer = [[1, 1], [2, 0.5]]\nHermitePolynomialsUsingDividedDifferences(b, bDer, 1.5)\n\nlistOfPoints_x_y = [[1.3, 0.620086], [1.6, 0.4554022], [1.9, 0.2818186]]\nderivativeValues = [[1.3, -0.5220232], [1.6, -0.5698959], [1.9, -0.5811571]]\nHermitePolynomialsUsingDividedDifferences(listOfPoints_x_y, derivativeValues, 1.5)\n\nlistOfPoints_x_y = [[0, 1], [1, math.e]]\nderivativeValues = [[0, 1], [1, math.e]]\nHermitePolynomialsUsingDividedDifferences(listOfPoints_x_y, derivativeValues, 1.5)\n\"\"\"\n"
}
] | 1 |
chinnamanaidu/rutgers_bootcamp_final_project
|
https://github.com/chinnamanaidu/rutgers_bootcamp_final_project
|
9c01262d0cca08999a9e9fd89138d75eabea8da1
|
b9ee9e77ea0c1a5baaa9184b6402f13f02c5424b
|
b2ef12b3c0e9b84ee716e4b6e5f51bb3c24bbc24
|
refs/heads/main
| 2023-02-25T20:19:25.677390 | 2021-01-31T11:46:23 | 2021-01-31T11:46:23 | 330,218,195 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6346396803855896,
"alphanum_fraction": 0.6371681690216064,
"avg_line_length": 32,
"blob_id": "f6bea28ab33fb111d26c2d3a532ad724b2c2681b",
"content_id": "3a19afaa390b4505d2df8f9312fcfc438d2d5ad0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 791,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 24,
"path": "/yahoo_autocomplete_ajax.php",
"repo_name": "chinnamanaidu/rutgers_bootcamp_final_project",
"src_encoding": "UTF-8",
"text": "header('Content-type:text/html; charset=UTF-8;');\n\n$action = (isset($_GET['action'])) ? $_GET['action'] : null;\n$symbol = (isset($_GET['symbol'])) ? $_GET['symbol'] : null;\n\nswitch($action) {\n case 'autocjson':\n getYahooSymbolAutoComplete($symbol);\n break;\n}\n\nfunction getYahooSymbolAutoCompleteJson($symbolChar) {\n $data = @file_get_contents(\"http://d.yimg.com/aq/autoc?callback=YAHOO.util.ScriptNodeDataSource.callbacks&query=$symbolChar\");\n\n // parse yahoo data into a list of symbols\n $result = [];\n $json = json_decode(substr($data, strlen('YAHOO.util.ScriptNodeDataSource.callbacks('), -1));\n\n foreach ($json->ResultSet->Result as $stock) {\n $result[] = '('.$stock->symbol.') '.$stock->name;\n }\n\n echo json_encode(['symbols' => $result]);\n}"
},
{
"alpha_fraction": 0.6335614323616028,
"alphanum_fraction": 0.6557350158691406,
"avg_line_length": 31.7917423248291,
"blob_id": "334634e80e931c71eafe38c801eb4e30ba0f4f2b",
"content_id": "b55a8b907cf5442116a03e89fe31f5ae416a5557",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18265,
"license_type": "permissive",
"max_line_length": 189,
"num_lines": 557,
"path": "/app.py",
"repo_name": "chinnamanaidu/rutgers_bootcamp_final_project",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport time\nimport csv\nimport datetime, time\nimport requests\nimport string\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nimport os\nfrom pathlib import Path\n\nimport warnings\nwarnings.simplefilter('ignore', FutureWarning)\nfrom numpy.random import seed\nseed(1)\n#from sql_keys import username, password\n\n# Create an instance of Flask\napp = Flask(__name__)\n\n# Use PyMongo to establish Mongo connection\n#mongo = PyMongo(app, uri=\"mongodb://localhost:27017/web_scrapping_challenge_db\")\n\nrds_connection_string = \"postgres:admin@localhost:5432/final_project_stocks\"\n#rds_connection_string = \"postgres:admin@localhost:5432/final_project_stocks\"\n#<insert password>@localhost:5432/customer_db\"\nengine = create_engine(f'postgresql://{rds_connection_string}')\n\n#conn_url = 'postgres://slcslzlanikhqj:b106eda2173b6ce2c35f34626fb87eafcfd0f52e9ce55f36d776827fef375f71@ec2-52-3-4-232.compute-1.amazonaws.com:5432/ddv6vu8jpdbjns'\n#engine = create_engine(conn_url)\n\n\n#conn_url = 'postgres://wnhlndefflhtpu:d5f994af42137d89ab637af376441407d47cfbe163426ec823e3bf602599ee7c@ec2-54-163-215-125.compute-1.amazonaws.com:5432/dvfh8o0788t9q'\n#engine = create_engine(conn_url)\n\n#postgres://wnhlndefflhtpu:d5f994af42137d89ab637af376441407d47cfbe163426ec823e3bf602599ee7c@ec2-54-163-215-125.compute-1.amazonaws.com:5432/dvfh8o0788t9q\n\n\n# Route to render index.html template using data from Mongo\[email protected](\"/\")\ndef home():\n\n # Find one record of data from the mongo database\n # @TODO: YOUR CODE HERE!\n session = Session(engine)\n stocks = session.execute(\"select * from stocks \")\n #country = session.execute(\" select country, country_code from country \")\n #return render_template(\"index.html\", listings=listings)\n # Return template and data\n \n resdata = [{\n \n }\n ]\n\n responsedata = { 'respdata': resdata\n }\n session.close()\n return render_template(\"index.html\", stocks=stocks, responsedata=responsedata, \n init_page=\"initpage\")\n\n\n# Route to render index.html template using data from Mongo\[email protected](\"/kclass\")\ndef kclass():\n\n # Find one record of data from the mongo database\n # @TODO: YOUR CODE HERE!\n session = Session(engine)\n stocks = session.execute(\"select * from stocks \")\n #return render_template(\"index.html\", listings=listings)\n # Return template and data\n \n resdata = [{\n \n }\n ]\n\n responsedata = { 'respdata': resdata\n }\n session.close()\n return render_template(\"kclassification.html\", stocks=stocks, responsedata=responsedata, \n init_page=\"initpage\")\n\n#\n#@app.route(\"/api/v1.0/<startdt>/<enddt>\")\n#def startEndDate(startdt, enddt):\n\n# Route to render index.html template using data from Mongo\[email protected](\"/<st>\")\ndef get_stocks(st):\n\n\n # Find one record of data from the mongo database\n # @TODO: YOUR CODE HERE!\n \n session = Session(engine)\n stocks = session.execute(\"select * from stocks \")\n #return render_template(\"index.html\", listings=listings)\n # Return template and data\n\n \n resdata = [{\n \n }\n ]\n\n responsedata = { 'respdata': resdata\n }\n session.close()\n\n print('Hello this is test')\n df = pd.read_csv(\"static/data/\"+st+\".csv\")\n # Drop the null columns where all values are null\n df = df.dropna(axis='columns', how='all')\n # Drop the null rows\n # This is for the MinMax Linear Regression model\n print(df.head())\n df = df.dropna()\n print(df.head())\n y = df[\"Open\"].values.reshape(-1, 1)\n diff = df['Close']-df[\"Open\"]\n diff_locations = []\n for i in diff:\n if (i <0):\n diff_locations.append(0)\n else:\n diff_locations.append(1)\n df['diff'] = pd.DataFrame(diff_locations)\n #X = df[['High', 'Low', 'Close', 'Volume','diff']]\n X = df[['High', 'Low', 'Close', 'Volume','diff']]\n print(X)\n print(y)\n print(X.shape, y.shape)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n X_minmax = MinMaxScaler().fit(X_train)\n y_minmax = MinMaxScaler().fit(y_train)\n\n X_train_minmax = X_minmax.transform(X_train)\n X_test_minmax = X_minmax.transform(X_test)\n y_train_minmax = y_minmax.transform(y_train)\n y_test_minmax = y_minmax.transform(y_test)\n model2 = LinearRegression()\n model2.fit(X_train_minmax, y_train_minmax)\n print(f\"Testing Data Score: {model2.score(X_test_minmax, y_test_minmax)}\")\n minmax_predict=model2.score(X_test_minmax, y_test_minmax)\n print(minmax_predict)\n\n #This is standard scalar transformation\n X_scaler = StandardScaler().fit(X_train)\n y_scaler = StandardScaler().fit(y_train)\n X_train_scaled = X_scaler.transform(X_train)\n X_test_scaled = X_scaler.transform(X_test)\n y_train_scaled = y_scaler.transform(y_train)\n y_test_scaled = y_scaler.transform(y_test)\n model = LinearRegression()\n model.fit(X_train_scaled, y_train_scaled)\n predictions = model.predict(X_test_scaled)\n scallar_MSE = mean_squared_error(y_test_scaled, predictions)\n scallar_r2 = model.score(X_test_scaled, y_test_scaled)\n plt.scatter(model.predict(X_train_scaled), model.predict(X_train_scaled) - y_train_scaled, c=\"blue\", label=\"Training Data\")\n plt.scatter(model.predict(X_test_scaled), model.predict(X_test_scaled) - y_test_scaled, c=\"orange\", label=\"Testing Data\")\n #plt.legend()\n plt.hlines(y=0, xmin=y_test_scaled.min(), xmax=y_test_scaled.max())\n plt.title(\"Residual Plot\")\n #plt.show()\n pwd = os.getcwd()\n print(pwd)\n #p = Path(os.getcwd()+\"\\static\\images\")\n plt.savefig(\"static/images/\"+st+\".png\")\n f = open(\"static/images/\"+st+\".png\")\n plt.close()\n f.close()\n\n #Lasso model\n ### BEGIN SOLUTION\n lasso = Lasso(alpha=.01).fit(X_train_scaled, y_train_scaled)\n\n lasso_predictions = lasso.predict(X_test_scaled)\n\n lasso_MSE = mean_squared_error(y_test_scaled, lasso_predictions)\n lasso_r2 = lasso.score(X_test_scaled, y_test_scaled)\n ### END SOLUTION\n\n print(f\"Lasso MSE: {lasso_MSE}, R2: {lasso_r2}\")\n\n #Ridge model\n ridgeVal = Ridge(alpha=.01).fit(X_train_scaled, y_train_scaled)\n\n ridge_predictions = ridgeVal.predict(X_test_scaled)\n\n ridge_MSE = mean_squared_error(y_test_scaled, ridge_predictions)\n ridge_r2 = ridgeVal.score(X_test_scaled, y_test_scaled)\n print(f\"ridge MSE: {ridge_MSE}, R2: {ridge_r2}\")\n\n #elasticNet\n elasticnet = ElasticNet(alpha=.01).fit(X_train_scaled, y_train_scaled)\n\n elasticnet_predictions = elasticnet.predict(X_test_scaled)\n\n elasticnet_MSE = mean_squared_error(y_test_scaled, elasticnet_predictions)\n elasticnet_r2 = elasticnet.score(X_test_scaled, y_test_scaled)\n print(f\"elasticnet MSE: {elasticnet_MSE}, R2: {elasticnet_r2}\")\n\n fig1 = plt.figure(figsize=(12, 6))\n axes1 = fig1.add_subplot(1, 2, 1)\n axes2 = fig1.add_subplot(1, 2, 2)\n\n axes1.set_title(\"Original Data\")\n axes2.set_title(\"Scaled Data\")\n\n maxx = X_train[\"High\"].max()\n maxy = y_train.max()\n axes1.set_xlim(-maxx + 1, maxx + 1)\n axes1.set_ylim(-maxy + 1, maxy + 1)\n\n axes2.set_xlim(-2, 2)\n axes2.set_ylim(-2, 2)\n set_axes(axes1)\n set_axes(axes2)\n\n axes1.scatter(X_train[\"High\"], y_train)\n axes2.scatter(X_train_scaled[:,0], y_train_scaled[:])\n \n p = Path(os.getcwd()+\"/static/images\")\n #q = p / \"axes2\"+st+\".png\"\n #if (q.exists()):\n fig1.savefig(\"static/images/axes2\"+st+\".png\")\n f = open(\"static/images/axes2\"+st+\".png\")\n plt.close()\n f.close()\n #else:\n # fig1.savefig(\"static/images/axes2\"+st+\".png\")\n # plt.close()\n\n\n\n \n return render_template(\"indexStocks.html\", stocks=stocks, responsedata=responsedata, init_page=\"initpage\", sel_stk=st, \n minmax_predict=minmax_predict,\n scallar_MSE=scallar_MSE, scallar_r2=scallar_r2,\n lasso_MSE=lasso_MSE, lasso_r2=lasso_r2,\n ridge_MSE=ridge_MSE, ridge_r2=ridge_r2,\n elasticnet_MSE=elasticnet_MSE, elasticnet_r2=elasticnet_r2)\n \n\n#\n#@app.route(\"/api/v1.0/<startdt>/<enddt>\")\n#def startEndDate(startdt, enddt):\n\n# Route to render index.html template using data from Mongo\[email protected](\"/upload/<st>\")\ndef upload_get_stocks(st):\n\n\n # Find one record of data from the mongo database\n # @TODO: YOUR CODE HERE!\n\n #cr = csv.reader(open(\"https://query1.finance.yahoo.com/v7/finance/download/\"+st+\"?period1=1454112000&period2=1611964800&interval=1d&events=history&includeAdjustedClose=true\",\"rb\"))\n \n\n #data = pd.read_csv('https://example.com/passkey=wedsmdjsjmdd')\n\n #df = pd.read_csv(\"static/data/\"+st+\".csv\")\n\n #with open(\"static/data/\"+st+\".csv\", \"wt\") as fp:\n # writer = csv.writer(fp)\n # # writer.writerow([\"your\", \"header\", \"foo\"]) # write header\n # writer.writerows(data)\n\n\n\n #dateval = datetime.date.strtime(\"%D\")\n #print(dateval)\n session = Session(engine)\n stock = session.execute(\"select * from stocks where symbol='\"+st+\"'\")\n #return render_template(\"index.html\", listings=listings)\n # Return template and data\n \n\n if (stock.rowcount == 0):\n data = pd.read_csv(\"https://query1.finance.yahoo.com/v7/finance/download/\"+st+\"?period1=1454112000&period2=1611964800&interval=1d&events=history&includeAdjustedClose=true\", sep=',')\n\n data.to_csv(\"static/data/\"+st+\".csv\", index=False, header=True)\n \n print(data)\n session.execute(\"INSERT INTO stocks VALUES ('\"+st+\"', '\"+st+\" Corp')\")\n session.execute(\"commit\")\n\n\n\n stocks = session.execute(\"select * from stocks\")\n\n\n resdata = [{\n \n }\n ]\n\n responsedata = { 'respdata': resdata\n }\n session.close()\n\n print('Hello this is test')\n data = pd.read_csv(\"static/data/\"+st+\".csv\")\n df = data\n # Drop the null columns where all values are null\n df = df.dropna(axis='columns', how='all')\n # Drop the null rows\n # This is for the MinMax Linear Regression model\n print(df.head())\n df = df.dropna()\n print(df.head())\n y = df[\"Open\"].values.reshape(-1, 1)\n diff = df['Close']-df[\"Open\"]\n diff_locations = []\n for i in diff:\n if (i <0):\n diff_locations.append(0)\n else:\n diff_locations.append(1)\n df['diff'] = pd.DataFrame(diff_locations)\n #X = df[['High', 'Low', 'Close', 'Volume','diff']]\n X = df[['High', 'Low', 'Close', 'Volume','diff']]\n print(X)\n print(y)\n print(X.shape, y.shape)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n X_minmax = MinMaxScaler().fit(X_train)\n y_minmax = MinMaxScaler().fit(y_train)\n\n X_train_minmax = X_minmax.transform(X_train)\n X_test_minmax = X_minmax.transform(X_test)\n y_train_minmax = y_minmax.transform(y_train)\n y_test_minmax = y_minmax.transform(y_test)\n model2 = LinearRegression()\n model2.fit(X_train_minmax, y_train_minmax)\n print(f\"Testing Data Score: {model2.score(X_test_minmax, y_test_minmax)}\")\n minmax_predict=model2.score(X_test_minmax, y_test_minmax)\n print(minmax_predict)\n\n #This is standard scalar transformation\n X_scaler = StandardScaler().fit(X_train)\n y_scaler = StandardScaler().fit(y_train)\n X_train_scaled = X_scaler.transform(X_train)\n X_test_scaled = X_scaler.transform(X_test)\n y_train_scaled = y_scaler.transform(y_train)\n y_test_scaled = y_scaler.transform(y_test)\n model = LinearRegression()\n model.fit(X_train_scaled, y_train_scaled)\n predictions = model.predict(X_test_scaled)\n scallar_MSE = mean_squared_error(y_test_scaled, predictions)\n scallar_r2 = model.score(X_test_scaled, y_test_scaled)\n plt.scatter(model.predict(X_train_scaled), model.predict(X_train_scaled) - y_train_scaled, c=\"blue\", label=\"Training Data\")\n plt.scatter(model.predict(X_test_scaled), model.predict(X_test_scaled) - y_test_scaled, c=\"orange\", label=\"Testing Data\")\n #plt.legend()\n plt.hlines(y=0, xmin=y_test_scaled.min(), xmax=y_test_scaled.max())\n plt.title(\"Residual Plot\")\n #plt.show()\n pwd = os.getcwd()\n print(pwd)\n #p = Path(os.getcwd()+\"\\static\\images\")\n plt.savefig(\"static/images/\"+st+\".png\")\n f = open(\"static/images/\"+st+\".png\")\n plt.close()\n f.close()\n\n #Lasso model\n ### BEGIN SOLUTION\n lasso = Lasso(alpha=.01).fit(X_train_scaled, y_train_scaled)\n\n lasso_predictions = lasso.predict(X_test_scaled)\n\n lasso_MSE = mean_squared_error(y_test_scaled, lasso_predictions)\n lasso_r2 = lasso.score(X_test_scaled, y_test_scaled)\n ### END SOLUTION\n\n print(f\"Lasso MSE: {lasso_MSE}, R2: {lasso_r2}\")\n\n #Ridge model\n ridgeVal = Ridge(alpha=.01).fit(X_train_scaled, y_train_scaled)\n\n ridge_predictions = ridgeVal.predict(X_test_scaled)\n\n ridge_MSE = mean_squared_error(y_test_scaled, ridge_predictions)\n ridge_r2 = ridgeVal.score(X_test_scaled, y_test_scaled)\n print(f\"ridge MSE: {ridge_MSE}, R2: {ridge_r2}\")\n\n #elasticNet\n elasticnet = ElasticNet(alpha=.01).fit(X_train_scaled, y_train_scaled)\n\n elasticnet_predictions = elasticnet.predict(X_test_scaled)\n\n elasticnet_MSE = mean_squared_error(y_test_scaled, elasticnet_predictions)\n elasticnet_r2 = elasticnet.score(X_test_scaled, y_test_scaled)\n print(f\"elasticnet MSE: {elasticnet_MSE}, R2: {elasticnet_r2}\")\n\n fig1 = plt.figure(figsize=(12, 6))\n axes1 = fig1.add_subplot(1, 2, 1)\n axes2 = fig1.add_subplot(1, 2, 2)\n\n axes1.set_title(\"Original Data\")\n axes2.set_title(\"Scaled Data\")\n\n maxx = X_train[\"High\"].max()\n maxy = y_train.max()\n axes1.set_xlim(-maxx + 1, maxx + 1)\n axes1.set_ylim(-maxy + 1, maxy + 1)\n\n axes2.set_xlim(-2, 2)\n axes2.set_ylim(-2, 2)\n set_axes(axes1)\n set_axes(axes2)\n\n axes1.scatter(X_train[\"High\"], y_train)\n axes2.scatter(X_train_scaled[:,0], y_train_scaled[:])\n \n p = Path(os.getcwd()+\"/static/images\")\n #q = p / \"axes2\"+st+\".png\"\n #if (q.exists()):\n fig1.savefig(\"static/images/axes2\"+st+\".png\")\n f = open(\"static/images/axes2\"+st+\".png\")\n plt.close()\n f.close()\n #else:\n # fig1.savefig(\"static/images/axes2\"+st+\".png\")\n # plt.close()\n\n\n\n \n return render_template(\"indexStocks.html\", stocks=stocks, responsedata=responsedata, init_page=\"initpage\", sel_stk=st, \n minmax_predict=minmax_predict,\n scallar_MSE=scallar_MSE, scallar_r2=scallar_r2,\n lasso_MSE=lasso_MSE, lasso_r2=lasso_r2,\n ridge_MSE=ridge_MSE, ridge_r2=ridge_r2,\n elasticnet_MSE=elasticnet_MSE, elasticnet_r2=elasticnet_r2)\n \n\n# Route to render index.html template using data from Mongo\[email protected](\"/kclassification/<st>\")\ndef kclassification(st):\n\n # Find one record of data from the mongo database\n # @TODO: YOUR CODE HERE!\n session = Session(engine)\n stocks = session.execute(\"select * from stocks \")\n #return render_template(\"index.html\", listings=listings)\n # Return template and data\n \n resdata = [{\n \n }\n ]\n\n responsedata = { 'respdata': resdata\n }\n session.close()\n\n print('Hello this is test')\n df = pd.read_csv(\"static/data/\"+st+\".csv\")\n # Drop the null columns where all values are null\n df = df.dropna(axis='columns', how='all')\n # Drop the null rows\n # This is for the MinMax Linear Regression model\n print(df.head())\n df = df.dropna()\n print(df.head())\n \n diff = df['Close']-df[\"Open\"]\n diff_locations = []\n for i in diff:\n if (i <0):\n diff_locations.append(0)\n else:\n diff_locations.append(1)\n df['diff'] = pd.DataFrame(diff_locations)\n #X = df[['High', 'Low', 'Close', 'Volume','diff']]\n X = df[['Open','High', 'Low', 'Close', 'Volume']]\n y = df[\"diff\"]\n print(X)\n print(y)\n print(st)\n print(X.shape, y.shape)\n #target_names = [\"negative\", \"positive\"]\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n X_scaler = StandardScaler().fit(X_train)\n X_train_scaled = X_scaler.transform(X_train)\n X_test_scaled = X_scaler.transform(X_test)\n train_scores = []\n test_scores = []\n\n for k in range(1, 20, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train_scaled, y_train)\n train_score = knn.score(X_train_scaled, y_train)\n test_score = knn.score(X_test_scaled, y_test)\n train_scores.append(train_score)\n test_scores.append(test_score)\n print(f\"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}\")\n \n \n plt.plot(range(1, 20, 2), train_scores, marker='o')\n plt.plot(range(1, 20, 2), test_scores, marker=\"x\")\n plt.xlabel(\"k neighbors\")\n plt.ylabel(\"Testing accuracy Score\")\n\n\n p = Path(os.getcwd()+\"/static/images\")\n #q = p / \"kclass\"+st+\".png\"\n #if (q.exists()):\n plt.savefig(\"static/images/kclass\"+st+\".png\")\n plt.close()\n f = open(\"static/images/kclass\"+st+\".png\")\n f.close()\n #else:\n # plt.savefig(\"static/images/kclass\"+st+\".png\")\n # plt.close()\n \n knn = KNeighborsClassifier(n_neighbors=13)\n knn.fit(X_train_scaled, y_train)\n print('k=13 Test Acc: %.3f' % knn.score(X_test_scaled, y_test))\n knn_score = knn.score(X_test_scaled, y_test)\n\n\n\n return render_template(\"kclassStocks.html\", stocks=stocks, responsedata=responsedata, sel_stk_kc=st, \n init_page=\"initpage\",knn_score=knn_score)\n\ndef set_axes(ax):\n ax.spines['left'].set_position('center')\n ax.spines['right'].set_color('none')\n ax.spines['bottom'].set_position('center')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n"
}
] | 2 |
HuRuWo/Django
|
https://github.com/HuRuWo/Django
|
250821b093f0b26ea2db9207d96db37b396791d9
|
767428abc92e52a5cae5a625959eba1da0ef2f25
|
cf8e67ce6ef46dc39c6cbbe20ec451dbbc7e225e
|
refs/heads/master
| 2017-12-01T21:27:58.421711 | 2017-04-27T08:24:09 | 2017-04-27T08:24:09 | 62,480,949 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5747028589248657,
"alphanum_fraction": 0.5806451439857483,
"avg_line_length": 22.540000915527344,
"blob_id": "8dacf72d232a2615cf2c4e0146a09f7b5962f1c6",
"content_id": "7db16e5fe0d5beb879b808e58096cfaaa6673abb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1412,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 50,
"path": "/myblog/blog/forms.py",
"repo_name": "HuRuWo/Django",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nfrom django import forms\n\n\nclass CommentForm(forms.Form):\n \"\"\"\n 评论表单\n \"\"\"\n\n name = forms.CharField(label='您的称呼', max_length=16, error_messages={\n 'required': '请填写您的称呼',\n 'max_length': '称呼太长'\n })\n\n email = forms.EmailField(label='您的邮箱', error_messages={\n 'required': '请填写您的邮箱',\n 'invalid': '邮箱格式不正确'\n })\n\n content = forms.CharField(label='评论内容', error_messages={\n 'required': '请填写您的评论内容',\n 'max_length': '评论内容太长'\n })\n\n\nclass RegisterForm(forms.Form):\n \"\"\"\n 注册表单\n \"\"\"\n\n username = forms.CharField(label='帐号', max_length=16, error_messages={\n 'required': '请填写您的称呼',\n 'max_length': '称呼太长'\n })\n email = forms.EmailField(label='邮箱', error_messages={\n 'required': '请填写您的邮箱',\n 'invalid': '邮箱格式不正确'\n })\n\n phone = forms.CharField(label='手机', error_messages={\n 'required': '请填写您的电话号码',\n })\n\n password1 = forms.CharField(label='密码', error_messages={\n 'required': '请填写您的密码',\n },widget=forms.PasswordInput)\n\n password2 = forms.CharField(label='密码', error_messages={\n 'required': '重复您的密码',\n },widget=forms.PasswordInput)\n\n"
},
{
"alpha_fraction": 0.6072644591331482,
"alphanum_fraction": 0.7298524379730225,
"avg_line_length": 25.696969985961914,
"blob_id": "c6c8343bc8205581cd033850f1c81b8bd6932205",
"content_id": "03defd22d2a9d80bc89b9a67950838f9df06fd10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1139,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 33,
"path": "/README.md",
"repo_name": "HuRuWo/Django",
"src_encoding": "UTF-8",
"text": "# Django博客系统\n=========\n##使用pythonweb框架django写的网站<br>\ndjango版本1.9.6<br>\n数据库:sqlite3<br>\n前端:bootsharp<br>\n具体文章访问个人博客:www.wanjiaxing.top<br>\n\n\n版本1.0<br>\n实现基本的博客功能。<br>\n1文章的增加和删除。<br>\n2文章列表和详情。 <br>\n3评论功能以及后台管理。<br>\n4分页功能。<br>\n\n##如需正常使用,需安装以下模块\ndjango-pagination>=1.07\n\n##后台密码\n帐号 wanwan 密码 wanwan123456\n\n###使用方法: cmd>>移动到blog目录 >>输入 python manage.py runserver<br>\n\n\n###演示页面<br>\n首页:文章列表<br>\n\n###文章详情页:<br>\n\n###后台管理:<br>\n\n\n"
},
{
"alpha_fraction": 0.5643822550773621,
"alphanum_fraction": 0.5691249966621399,
"avg_line_length": 31.84375,
"blob_id": "f4e9ae4164b215f67253d4cd670fce03a3b1313a",
"content_id": "421956e47db70a469b6752cb93d92e8cb331c011",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4362,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 128,
"path": "/myblog/blog/views.py",
"repo_name": "HuRuWo/Django",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nfrom django.http import HttpResponse,HttpResponseRedirect,Http404\nfrom django.shortcuts import render,render_to_response\nfrom .forms import CommentForm,RegisterForm\nfrom django.template import loader, RequestContext\nimport time\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom .models import Blog,UserProfile,Comment\n# Create your views here.\n\n\ndef fenye_blog(request,yeshu):\n a=int(yeshu)\n ctx={\n 'str':yeshu,\n 'blogs': Blog.objects.all()[(a-1)*5:a*5]\n }\n return render(request, 'fenye.html', ctx,)\n\ndef get_blogs(request):\n ctx = {\n 'blogs': Blog.objects.all().order_by('-created')[0:5]\n }\n return render(request, 'index.html', ctx)\n\n\ndef get_detail(request, blog_id):\n try:\n blog = Blog.objects.get(id=blog_id)\n except Blog.DoesNotExist:\n raise Http404\n\n if request.method == 'GET':\n form = CommentForm()\n else:\n form = CommentForm(request.POST)\n if form.is_valid():\n cleaned_data = form.cleaned_data\n cleaned_data['blog'] = blog\n Comment.objects.create(**cleaned_data)\n\n ctx = {\n 'blog': blog,\n 'comments': blog.comment_set.all().order_by('-created'),\n 'form': form\n }\n return render(request, 'detail.html', ctx)\n\ndef base(request):\n return render(request, 'base.html')\n\ndef userRegister(request):\n curtime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n errors = []\n form = RegisterForm()\n ctx = {\n 'form': form,\n 'errors':errors,\n }\n if request.user.is_authenticated(): # 检查是否已登录\n auth.logout(request)\n return HttpResponse(\"/userregister\") #重定向到用户界面\n try:\n if request.method == 'POST':\n username = request.POST.get('username', '')\n password1 = request.POST.get('password1', '')\n password2 = request.POST.get('password2', '')\n email = request.POST.get('email', '')\n phone = request.POST.get('phone', '')\n\n\n print username,password1,password2,email,phone\n\n registerForm = RegisterForm({'username': username, 'password1': password1, 'password2': password2, 'email': email,'phone':phone}) # b********\n if not registerForm.is_valid():#is_valid 所有验证都通过\n errors.extend(registerForm.errors.value())\n return render(request, \"userregister.html\", ctx)\n user = User() # d************************\n user.username = username\n user.set_password(password1)\n user.email = email\n user.save()\n # 用户扩展信息 profile\n profile = UserProfile() # e*************************\n profile.user_id = user.id\n profile.phone = phone\n profile.save()\n # 登录前需要先验证\n newUser = auth.authenticate(username=username, password=password1) # f***************\n if newUser is not None:\n print '注册成功'\n auth.login(request, newUser) # g*******************\n return HttpResponseRedirect(\"/\")\n else:\n return render(request, \"userregister.html\", ctx)\n except Exception,e:\n errors.append(str(e))\n return render(request,\"userregister.html\", ctx)\n\ndef Login(request):\n errors = []\n ctx = {\n 'errors': errors,\n }\n if request.user.is_authenticated(): # 检查是否已登录\n auth.logout(request)\n errors.append('\"已注销当前用户\"')\n return render(request, \"login.html\", ctx)\n if request.method == 'POST':\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n newUser = auth.authenticate(username=username, password=password) # f***************\n if newUser is not None:\n print '登陆成功'\n auth.login(request, newUser) # g*******************\n return HttpResponseRedirect(\"/\")\n else:\n errors.append('请验证你的密码或帐号是否有误')\n return render(request, \"login.html\", ctx)\n else:\n return render(request, 'login.html')\n\n\n\ndef Logout(request):\n auth.logout(request)\n return HttpResponseRedirect('/')\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.748235285282135,
"alphanum_fraction": 0.748235285282135,
"avg_line_length": 29.428571701049805,
"blob_id": "1e04ec6324816c69308676734fa08379fefc09c7",
"content_id": "db57ebced2ba3178f25d79ec91eed3fccb0face0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 14,
"path": "/myblog/blog/admin.py",
"repo_name": "HuRuWo/Django",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Category, Tag, Blog\n\nclass BlogAdmin(admin.ModelAdmin):\n list_display = ('title','author','created',)\nadmin.site.register(Blog,BlogAdmin)\nclass CateAdmin(admin.ModelAdmin):\n list_display = ('name',)\nadmin.site.register(Category,CateAdmin,)\nclass TagAdmin(admin.ModelAdmin):\n list_display = ('name',)\nadmin.site.register(Tag,TagAdmin)"
}
] | 4 |
winstonjoels/churn_prediction_gcp
|
https://github.com/winstonjoels/churn_prediction_gcp
|
46780897afec6c2fb333861b1f96545cc9e0af66
|
2d1bcb2cedc4d9b2388a71dae61e88b018608064
|
a6958673e7115fb8f38e7a048828d9ec782a88fa
|
refs/heads/master
| 2022-12-05T08:59:31.054644 | 2020-08-28T07:34:50 | 2020-08-28T07:34:50 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6115162968635559,
"alphanum_fraction": 0.6165066957473755,
"avg_line_length": 31.5625,
"blob_id": "08a885409197e8a3d85daa2d59d5b25885f91a0b",
"content_id": "44ce9f098e0fa24b373df000cdc12cf5457d50c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2605,
"license_type": "no_license",
"max_line_length": 354,
"num_lines": 80,
"path": "/app.py",
"repo_name": "winstonjoels/churn_prediction_gcp",
"src_encoding": "UTF-8",
"text": "from flask import Flask,request, url_for, redirect, render_template, jsonify\nimport pandas as pd\nfrom pycaret.regression import *\nimport pickle\nimport numpy as np\nimport random\nimport csv\n\ndef randN():\n N=7\n min = pow(10, N-1)\n max = pow(10, N) - 1\n id = random.randint(min, max)\n return id\n\napp = Flask(__name__)\n\nglobal model, cols, id\n\[email protected]('/')\ndef home():\n return render_template(\"home.html\")\n\[email protected]('/eval')\ndef eval():\n filename = \"data/Details.csv\"\n fields = [] \n rows = [] \n with open(filename, 'r') as csvfile: \n csvreader = csv.reader(csvfile) \n fields = next(csvreader) \n for row in csvreader: \n rows.append(row) \n df = pd.DataFrame(rows, columns = ['ID', 'Name', 'Predicted', 'Actual'])\n return render_template(\"eval.html\", tables=[df], titles=df.columns.values)\n\[email protected]('/model/<name>',methods=['POST'])\ndef model(name):\n global model, cols, id\n id = randN()\n if name==\"infy_bank\":\n cols = ['Age', 'Experience', 'Income', 'ZIP Code', 'Family', 'CCAvg', 'Education', 'Mortgage', 'Securities Account', 'CD Account', 'Online', 'CreditCard']\n else:\n cols = ['cap-shape','cap-surface','cap-color','bruises','odor','gill-attachment','gill-spacing','gill-size','gill-color','stalk-shape','stalk-root','stalk-surface-above-ring','stalk-surface-below-ring','stalk-color-above-ring','stalk-color-below-ring','veil-type','veil-color','ring-number','ring-type','spore-print-color','population','habitat']\n file = name+\".html\"\n return render_template(file, id=id)\n\[email protected]('/predict/<name>',methods=['POST'])\ndef predict(name):\n global model, cols, id\n int_features = [x for x in request.form.values()]\n final = np.array(int_features)\n if name == 'mush':\n name1 = name+\"_training_pipeline\"\n else:\n name1 = name\n model = load_model(name1)\n data_unseen = pd.DataFrame([final], columns = cols)\n prediction = model.predict(data_unseen)\n if name == 'mush':\n if int(prediction)==1:\n pred=\"The mushroom is Poisonous\"\n else:\n pred=\"The mushroom is Edible\"\n else:\n pred='The chance of this person is {}'.format(int(prediction))\n \n actual = '?'\n list = [id, name, int(prediction), actual]\n list2 = ['ID','Name', 'Predicted', 'Actual']\n df = np.array(list)\n df = pd.DataFrame([df], columns=list2)\n \n df.to_csv('data/Details.csv', mode='a', header=False, index=False)\n \n file = name+\".html\"\n return render_template(file,pred='{}'.format(pred))\n\nif __name__ == '__main__':\n app.run()\n"
},
{
"alpha_fraction": 0.47668394446372986,
"alphanum_fraction": 0.7098445892333984,
"avg_line_length": 16.545454025268555,
"blob_id": "91f354c08382f9e1b415f2ee7de550983f19c80a",
"content_id": "7643bdaf6c5adf0aac18f2e6e096a853a14369d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 11,
"path": "/requirements.txt",
"repo_name": "winstonjoels/churn_prediction_gcp",
"src_encoding": "UTF-8",
"text": "pycaret==1.0.0\nFlask==1.1.1\nJinja2==2.10.1\ngunicorn==19.9.0\ncertifi==2019.11.28\nitsdangerous==1.1.0\ncloudstorage==0.10.1\nwebapp2==2.5.2\nrequests==2.24.0\nPyMySQL==0.10.0\nFlask-SQLAlchemy==2.4.4\n"
}
] | 2 |
Abdullah-AlSawalmeh/django-crud
|
https://github.com/Abdullah-AlSawalmeh/django-crud
|
e95f3efd50e1a76269ec03ae72e67a2c260e3cd8
|
7290bd9c178a3624d551906f59f0ba47fff40e76
|
f74092c83fee561e916aeb05d8a2601ccf58b25a
|
refs/heads/main
| 2023-06-18T05:41:34.861029 | 2021-07-13T21:59:22 | 2021-07-13T21:59:22 | 385,744,978 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5856507420539856,
"alphanum_fraction": 0.5951056480407715,
"avg_line_length": 30.40350914001465,
"blob_id": "85b89f49874ce3fe4e993fd54452f1465043f63c",
"content_id": "022e49e2673c0e2106cf03c8b647bc271be70519",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1798,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 57,
"path": "/snacks_crud_project/snacks/tests.py",
"repo_name": "Abdullah-AlSawalmeh/django-crud",
"src_encoding": "UTF-8",
"text": " \nfrom django.test import TestCase \nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\nfrom .models import Snack\n\nclass SnacksCRUDTests(TestCase):\n \n def setUp(self):\n self.user = get_user_model().objects.create_user(\n username = 'abdullah',\n password = '123456'\n )\n self.snack = Snack.objects.create(\n title = 'mansaf',\n description = 'delecious',\n purchaser = self.user\n )\n\n\n def test_snack_list_view(self):\n url = reverse('snack_list')\n actual_status_code = self.client.get(url).status_code\n self.assertEqual(actual_status_code, 200)\n\n def test_snack_details_view(self):\n response = self.client.get(reverse('snack_detail', args='1'))\n self.assertEqual(response.status_code, 200)\n\n def test_snack_update_view(self):\n response = self.client.post(reverse('snack_update', args='1'), {\n 'title':'mansaf' ,\n })\n self.assertContains(response, 'mansaf')\n \n \n def test_snack_create_view(self):\n response = self.client.post(reverse(\"snack_create\"),\n {\n \"title\": \"maqloba\",\n \"description\": \"nice\",\n \"purchaser\": self.user\n })\n\n \n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'maqloba')\n self.assertContains(response, 'nice')\n self.assertContains(response, 'abdullah')\n\n\n def test_snack_delete_view(self):\n response = self.client.get(reverse(\"snack_delete\", args=\"1\"))\n self.assertEqual(response.status_code, 200)\n\n def test_queris(self):\n self.assertNumQueries(2) \n self.assertEqual(Snack.objects.get(pk=1).title,'mansaf') \n "
},
{
"alpha_fraction": 0.7231638431549072,
"alphanum_fraction": 0.7344632744789124,
"avg_line_length": 25.923076629638672,
"blob_id": "47521561d34d3348bd5dcaf2263f6ac6b31a8561",
"content_id": "d3e537a26491bd80c5275a3c073e37574f7d05eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 13,
"path": "/snacks_crud_project/snacks/models.py",
"repo_name": "Abdullah-AlSawalmeh/django-crud",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.urls import reverse\n\nclass Snack(models.Model):\n title=models.CharField(max_length=100)\n purchaser=models.ForeignKey('auth.User',on_delete=models.CASCADE ,default=1)\n description=models.TextField()\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('snack_list') \n\n\n"
}
] | 2 |
sahabi/collision-avoidance
|
https://github.com/sahabi/collision-avoidance
|
7581fefa34f68428a1f6048a818543bdc1fe8cc4
|
3c7b41d67397baa52f74417fd4b3aacdd4d85ebb
|
035b6286d8d1850e8533ff85815288bab5bf82c2
|
refs/heads/master
| 2021-09-03T12:02:56.340560 | 2018-01-08T22:42:52 | 2018-01-08T22:42:52 | 103,327,825 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5026033520698547,
"alphanum_fraction": 0.545176088809967,
"avg_line_length": 35.58263397216797,
"blob_id": "f536b59b8a4e43d021b88db87292f9b704ad8f2f",
"content_id": "c5ed6428a17058741db094d887cafd6facbfbcaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13060,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 357,
"path": "/pygame_env.py",
"repo_name": "sahabi/collision-avoidance",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport pygame\nfrom pygame.locals import *\nfrom time import sleep\nfrom Controller import Controller as Ca_4\nfrom pygame import Rect\n\nctrl = Ca_4()\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nPURPLE = (120, 78, 240)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nGREEN = (0, 200, 0)\n\nPOS_LIST = []\nN_LAYERS = 2\nrect = Rect((0,0,0,0))\n\ndef return_input(uav1_pos, uav2_pos, uav3_pos, uav1_2_collide, uav1_3_collide,\n uav2_3_collide, uav1_layer, uav2_layer, uav3_layer):\n return {\n \"uav1_pos\": Ca_4.Pos.NONE if uav1_pos == 0 else Ca_4.Pos.A,\n \"uav2_pos\": Ca_4.Pos.NONE if uav2_pos == 0 else Ca_4.Pos.A,\n \"uav3_pos\": Ca_4.Pos.NONE if uav3_pos == 0 else Ca_4.Pos.A,\n \"uav1_2_collide\": False,# uav1_2_collide,\n \"uav1_3_collide\": False,#uav1_3_collide,\n \"uav2_3_collide\": False,#uav2_3_collide,\n \"uav1_layer\": Ca_4.Layer.FIRST if uav1_layer == 0 else\n Ca_4.Layer.SECOND,\n \"uav2_layer\": Ca_4.Layer.FIRST if uav2_layer == 0 else\n Ca_4.Layer.SECOND,\n \"uav3_layer\": Ca_4.Layer.FIRST if uav3_layer == 0 else\n Ca_4.Layer.SECOND,\n }\n\ndef is_collide(uav1, uav2):\n return False if uav1.region_rect_1.collidelist([uav2.region_rect_1]) == -1\\\n and uav1.region_rect_2.collidelist([uav2.region_rect_2]) == -1\\\n else True\n\ndef is_in_or(op_reg, point):\n return op_reg.collidepoint(point)\n\ndef is_in(loc1, loc2):\n if abs(loc1[0] - loc2[0]) < 20 and abs(loc1[1] - loc2[1]) < 20:\n return True\n else:\n return False\n\ndef make_rect(initial, destination):\n if initial[0] < destination[0]:\n x = initial[0] - 10\n else:\n x = destination[0] - 10\n\n if initial[1] < destination[1]:\n y = initial[1] - 10\n else:\n y = destination[1] - 10\n w = abs(initial[0] - destination[0]) + 60\n l = abs(initial[1] - destination[1]) + 60\n return pygame.Rect(x, y, w, l)\n\ndef get_commands(uav, point):\n diff_x = (uav.position[0]%450) - point[0]\n diff_y = uav.position[1] - point[1]\n hor_cmd = ['east']*abs(diff_x/45) if diff_x <= 0 else ['west']*(diff_x/45)\n ver_cmd = ['south']*abs(diff_y/45) if diff_y <= 0 else ['north']*(diff_y/45)\n return hor_cmd + ver_cmd\n\ndef get_dir(loc_1, loc_2):\n if loc_1[0] > loc_2[0]:\n return 'west'\n elif loc_1[1] > loc_2[1]:\n return 'north'\n elif loc_1[1] < loc_2[1]:\n return 'south'\n elif loc_1[0] < loc_2[0]:\n return 'east'\n\nclass Circle(pygame.sprite.Sprite):\n def __init__(self, display, color, init_rect):\n pygame.sprite.Sprite.__init__(self)\n self.image = display\n self.rect = self.image.get_rect()\n\n def update(self,rect):\n self.image = pygame.Surface((rect[2], rect[3]))\n self.image.fill((255,255,255))\n self.rect = self.image.get_rect()\n self.rect.center = (rect[0], rect[1])\n pygame.draw.rect(self.image, BLACK, rect, 5)\n\nclass RegionSprite(pygame.sprite.Sprite):\n\n def __init__(self, display, position, rect):\n pygame.sprite.Sprite.__init__(self)\n self.position = position\n self.image = pygame.Surface((500,500))\n self.color = BLACK\n self.surface = display\n self.rect = self.surface.get_rect()\n pygame.draw.rect(self.surface, self.color, self.rect)\n\n def update(self, rect):\n self.rect.center = self.position\n\nclass PointSprite(pygame.sprite.Sprite):\n\n def __init__(self, position, label):\n pygame.sprite.Sprite.__init__(self)\n self.src_image = pygame.image.load(\"assets/point.png\")\n self.src_image = pygame.transform.scale(self.src_image,(25,25))\n self.position = position\n self.chosen = False\n self.label = label\n\n def update(self):\n if self.chosen:\n color_surface(self, RED)\n self.image = pygame.transform.rotate(self.src_image, 0)\n else:\n color_surface(self, BLACK)\n self.image = pygame.transform.rotate(self.src_image, 0)\n self.rect = self.image.get_rect()\n self.rect.center = (self.position[0]*450 + self.position[1],\n self.position[2])\n\n def select(self):\n self.chosen = True\n\n def unselect(self):\n self.chosen = False\n\nclass UAVSprite(pygame.sprite.Sprite):\n MAX_SPEED = 45\n TURN_SPEED = 5\n\n def __init__(self, position, color, display):\n pygame.sprite.Sprite.__init__(self)\n self.src_image = pygame.image.load(\"assets/uav.png\")\n self.position = position\n self.next_state = (position[0]+(position[1]/450), (position[1]%450)/45,\n position[2]/45)\n self.speed = self.direction = 0\n self.k_left = self.k_right = self.k_down = self.k_up = 0\n self.discrete = self.next_state\n arr = pygame.surfarray.pixels3d(self.src_image)\n arr[:,:,0] = color[0]\n arr[:,:,1] = color[1]\n arr[:,:,2] = color[2]\n self.color = color\n self.display = display\n self.region_rect_1 = (0,0,0,0)\n self.region_rect_2 = (0,0,0,0)\n\n def move(self, action):\n l, x, y = self.next_state\n if action == 'east' and self.next_state[1] < 9:\n x += 1\n elif action == 'west' and self.next_state[1] > 1:\n x -= 1\n elif action == 'south' and self.next_state[2] < 15:\n y += 1\n elif action == 'north' and self.next_state[2] > 1:\n y -= 1\n elif action == 'ascend' and self.next_state[0] < N_LAYERS-1:\n l += 1\n x += 0\n y += 0\n elif action == 'descend' and self.next_state[0] > 0:\n l -= 1\n x += 0\n y += 0\n self.next_state = (l, x, y)\n\n def update(self):\n x = self.next_state[1] * 45 + (450 * self.next_state[0])\n y = self.next_state[2] * 45\n self.position = (x, y)\n self.image = pygame.transform.rotate(self.src_image, 0)\n self.rect = self.image.get_rect()\n self.rect.center = self.position\n self.discrete = self.next_state\n pygame.draw.rect(self.display, self.color, self.region_rect_1, 2)\n pygame.draw.rect(self.display, self.color, self.region_rect_2, 2)\n\n def update_region(self, position, color = None):\n if True:\n initial = self.position\n rect_1 = make_rect((initial[0]%450,initial[1]),[position[1]%450,\n position[2]])\n rect_2 = make_rect(((initial[0]%450) + 450, initial[1]),[450 + (position[1]%450), position[2]])\n if color is not None:\n pygame.draw.rect(self.display, color, rect_1, 2)\n pygame.draw.rect(self.display, color, rect_2, 2)\n else:\n pygame.draw.rect(self.display, self.color, rect_1, 2)\n pygame.draw.rect(self.display, self.color, rect_2, 2)\n self.region_rect_1 = rect_1\n self.region_rect_2 = rect_2\n pygame.draw.rect(self.display, self.color, self.region_rect_1, 2)\n pygame.draw.rect(self.display, self.color, self.region_rect_2, 2)\n def get_layer(self):\n return self.discrete[0]\n\n def get_pos(self):\n for pos in POS_LIST:\n if is_in(self.rect.center, pos.rect.center):\n return pos.label\n return 0\n\ndef color_surface(surface, color):\n arr = pygame.surfarray.pixels3d(surface.src_image)\n arr[:,:,0] = color[0]\n arr[:,:,1] = color[1]\n arr[:,:,2] = color[2]\n\nclass App:\n def __init__(self):\n self._running = True\n self.display = None\n self.size = self.width, self.height = 900, 700\n self.layers = 2\n\n def on_init(self):\n pygame.init()\n pygame.font.init()\n myfont = pygame.font.SysFont('Comic Sans MS', 22)\n self.loc_a = myfont.render('Location A', False, (0, 0, 0))\n self.layer_1 = myfont.render('Layer 1', False, (0, 0, 0))\n self.layer_2 = myfont.render('Layer 2', False, (0, 0, 0))\n self.display = pygame.display.set_mode(self.size,\n pygame.HWSURFACE |\n pygame.DOUBLEBUF)\n self.background = pygame.Surface(self.display.get_size())\n self.background = self.background.convert()\n self.background.fill((255, 255, 255))\n self._running = True\n self.display.fill((255, 255, 255))\n pygame.draw.line(self.display, (100, 100, 100),\n [int(self.width/self.layers), 0],\n [int(self.width/self.layers), self.height], 5)\n rect = self.display.get_rect()\n self.uav_1 = UAVSprite((0 , 45, 45), BLACK, self.display)\n self.uav_2 = UAVSprite((0, 45,300), PURPLE,\n self.display)\n self.uav_3 = UAVSprite((1, 350, 145), GREEN, self.display)\n self.UAV_LIST = []\n self.UAV_LIST.append(self.uav_1)\n self.UAV_LIST.append(self.uav_2)\n self.UAV_LIST.append(self.uav_3)\n self.point_1 = PointSprite((0 , 300, 300), 1)\n\n POS_LIST.append(self.point_1)\n self.operating_region_1 = Circle(self.display, BLACK, (100,100,200,200))\n self.region_group = pygame.sprite.RenderPlain((self.operating_region_1))\n self.uav_group = pygame.sprite.RenderPlain((self.uav_1, self.uav_2,\n self.uav_3))\n self.point_group = pygame.sprite.RenderPlain(POS_LIST)\n self.layers = 2\n self.uav_group.update()\n self.uav_group.clear(self.display,self.background)\n self.uav_group.draw(self.display)\n\n self.point_group.update()\n self.point_group.clear(self.display,self.background)\n self.point_group.draw(self.display)\n\n self.uav_1.update_region((0, 150, 150))\n self.uav_2.update_region((0, 300, 300))\n self.uav_3.update_region((1, 400, 200))\n\n pygame.display.flip()\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n\n def on_loop(self):\n pass\n\n def on_cleanup(self):\n pygame.quit()\n\n def reset_map(self):\n self.display.fill(pygame.Color(\"white\"))\n pygame.draw.line(self.display, (100, 100, 100),\n [int(self.width/self.layers), 0],\n [int(self.width/self.layers), self.height], 5)\n self.display.blit(self.loc_a, (700, 320))\n self.display.blit(self.layer_1, (350,635))\n self.display.blit(self.layer_2, (800, 635))\n\n def show(self):\n self.reset_map()\n self.uav_group.update()\n self.uav_group.clear(self.display,self.background)\n self.uav_group.draw(self.display)\n\n self.point_group.update()\n self.point_group.clear(self.display,self.background)\n self.point_group.draw(self.display)\n pygame.display.flip()\n sleep(0.5)\n\n def on_execute(self):\n if self.on_init() == False:\n self._running = False\n\n while( self._running ):\n for event in pygame.event.get():\n self.on_event(event)\n\n self.show()\n\n # creating inputs to controller\n uav1_pos = self.uav_1.get_pos()\n uav2_pos = self.uav_2.get_pos()\n uav3_pos = self.uav_3.get_pos()\n uav1_2_collide = is_collide(self.uav_1, self.uav_2)\n uav1_3_collide = is_collide(self.uav_1, self.uav_3)\n uav2_3_collide = is_collide(self.uav_2, self.uav_3)\n uav1_layer = self.uav_1.get_layer()\n uav2_layer = self.uav_2.get_layer()\n uav3_layer = self.uav_3.get_layer()\n inputs = return_input(uav1_pos, uav2_pos, uav3_pos, uav1_2_collide,\n uav1_3_collide, uav2_3_collide, uav1_layer, uav2_layer,\n uav3_layer)\n\n # getting the outputs out of the controller\n outputs = ctrl.move(**inputs)\n commands = {0:[],1:[],2:[]}\n for i in range(0,3):\n if outputs['uav{}_goto'.format(i+1)] == Ca_4.Pos.A:\n self.UAV_LIST[i].update_region(self.point_1.position)\n self.show()\n if outputs['uav{}_action'.format(i+1)] == Ca_4.Command.ASC:\n self.UAV_LIST[i].move('ascend')\n elif outputs['uav{}_action'.format(i+1)] == Ca_4.Command.DES:\n self.UAV_LIST[i].move('descend')\n elif outputs['uav{}_goto'.format(i+1)] == Ca_4.Pos.A:\n commands[i] = get_commands(self.UAV_LIST[i],\n (self.point_1.position[1],\n self.point_1.position[2]))\n if len(commands[i]) > 0:\n for command in commands[i]:\n self.UAV_LIST[i].move(command)\n self.show()\n\n self.on_cleanup()\n\nif __name__ == \"__main__\" :\n theApp = App()\n theApp.on_execute()\n"
},
{
"alpha_fraction": 0.7693266868591309,
"alphanum_fraction": 0.7730673551559448,
"avg_line_length": 23.303030014038086,
"blob_id": "cd59f133530ab874cc8225f703ff9f7976c43a47",
"content_id": "8846e09cea1b99d2c82e988832d27012d8e8a09e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 802,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 33,
"path": "/README.md",
"repo_name": "sahabi/collision-avoidance",
"src_encoding": "UTF-8",
"text": "# Collision Avoidance Example\n\nThis is an example of using a Salty synthesized controller to control unmanned air vehicles (UAVs) following the collision avoidance protocol for UAVs in an airspace with multiple altitude layers.\n\n## Running the Example\n\n### Dependencies\n\n- Python 2\n\n- Salty\n\n- Pygame\n\nRun `sudo pip install pygame`\n\n- Jinja2\n\nRun `sudo pip install jinja2`\n\n### Steps:\n\n- Generate the controller\n\nRun `./gen_ctrl.sh`\n\nFirst, this script will generate ctrl.salt by running the script gen_ltl.py which uses ltl_temp.tl as a template. then will invoke salty on ctrl.salt and generate a concrete implementation of the controller in python.\n\n- Run the simulation\n\nRun `python simulation.py`\n\nThe script will load the controller and the environment and will run the simulation on the screen.\n"
}
] | 2 |
jlynnvaccaro/balanced_ideals_scripts
|
https://github.com/jlynnvaccaro/balanced_ideals_scripts
|
fbc0cc71c0763cf5f76cfdedfc690640ae3716ae
|
685e19a629d453119572898f6300f813319d855f
|
46652b507983968940e32b2eaf804066125e34c8
|
refs/heads/master
| 2023-07-08T05:05:20.604804 | 2021-08-11T20:29:23 | 2021-08-11T20:29:23 | 370,836,356 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6085320115089417,
"alphanum_fraction": 0.6173149347305298,
"avg_line_length": 16.326086044311523,
"blob_id": "20a0922ba911abd64d920ab2f3009751d59b464b",
"content_id": "ca9b9ccc53bdb1864f21ba5f54bce9a112e6f960",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 797,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 46,
"path": "/enumerate_rewrite/queue.h",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#ifndef QUEUE_H\n#define QUEUE_H\n\n#include <stdio.h>\n#include <stdlib.h>\n\n#define QUEUE_SIZE 5000\n\n#define ERROR(condition, msg, ...) if(condition){fprintf(stderr, msg, ##__VA_ARGS__); exit(1);}\n#ifdef _DEBUG\n#define LOG(msg, ...) fprintf(stderr, msg, ##__VA_ARGS__)\n#else\n#define LOG(msg, ...)\n#endif\n\ntypedef struct {\n unsigned int start;\n unsigned int end;\n int data[QUEUE_SIZE];\n} queue_t;\n\nstatic void queue_init(queue_t *q)\n{\n q->start = q->end = 0;\n}\n\nstatic void queue_put(queue_t *q, int x)\n{\n q->data[q->end++] = x;\n q->end %= QUEUE_SIZE;\n\n ERROR(q->start == q->end, \"The queue is full! Increase QUEUE_SIZE\\n\");\n}\n\nstatic int queue_get(queue_t *q)\n{\n if(q->start == q->end)\n return -1;\n\n int result = q->data[q->start++];\n q->start %= QUEUE_SIZE;\n\n return result;\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.7162500023841858,
"alphanum_fraction": 0.7225000262260437,
"avg_line_length": 31.653060913085938,
"blob_id": "44ae0244f91153ca2606da02767c8ed45b1f67ec",
"content_id": "6d5654a063f923e265e64c1455d6d52e66ff8f2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1600,
"license_type": "no_license",
"max_line_length": 228,
"num_lines": 49,
"path": "/enumerate_rewrite/Makefile",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "HEADERS=weyl.h thickenings.h queue.h bitvec.h\n\n#SPECIAL_OPTIONS=-O0 -g -D_DEBUG\n#SPECIAL_OPTIONS=-O3 -pg -funroll-loops -fno-inline\nSPECIAL_OPTIONS=-O3 -flto -funroll-loops -Winline\n\nOPTIONS=-m64 -march=native -mtune=native -std=gnu99 -D_GNU_SOURCE $(SPECIAL_OPTIONS)\nNAME=enumerate-rewrite\n\nall: enumerate graph weyldata weyldata_json\n\n$(NAME).tar.bz2: $(NAME) $(HEADERS) enumerate.c weyl.c thickenings.c\n\ttar cjhf $(NAME).tar.bz2 $(NAME)/weyldata.c $(NAME)/weyldata_json.c $(NAME)/enumerate.c $(NAME)/weyl.c $(NAME)/thickenings.c $(NAME)/weyl.h $(NAME)/thickenings.h $(NAME)/queue.h $(NAME)/bitvec.h $(NAME)/Makefile $(NAME)/graph.c\n\n$(NAME):\n\tln -s . $(NAME)\n\nweyldata_json: weyldata_json.o weyl.o thickenings.o\n\tgcc $(OPTIONS) -o weyldata_json weyldata_json.o thickenings.o weyl.o\n\nweyldata: weyldata.o weyl.o thickenings.o\n\tgcc $(OPTIONS) -o weyldata weyldata.o thickenings.o weyl.o\n\nenumerate: enumerate.o weyl.o thickenings.o\n\tgcc $(OPTIONS) -o enumerate enumerate.o thickenings.o weyl.o\n\ngraph: graph.o weyl.o\n\tgcc $(OPTIONS) -o graph graph.o weyl.o\n\nweyldata_json.o: weyldata_json.c $(HEADERS)\n\tgcc $(OPTIONS) -c weyldata_json.c\n\nweyldata.o: weyldata.c $(HEADERS)\n\tgcc $(OPTIONS) -c weyldata.c\n\nenumerate.o: enumerate.c $(HEADERS)\n\tgcc $(OPTIONS) -c enumerate.c\n\nthickenings.o: thickenings.c $(HEADERS)\n\tgcc $(OPTIONS) -c thickenings.c\n\nweyl.o: weyl.c $(HEADERS)\n\tgcc $(OPTIONS) -c weyl.c\n\ngraph.o: graph.c $(HEADERS)\n\tgcc $(OPTIONS) -c graph.c\n\nclean:\n\trm -f enumerate graph weyldata weyldata_json thickenings.o weyldata.o weyldata_json.o weyl.o enumerate.o graph.o $(NAME) $(NAME).tar.bz2\n"
},
{
"alpha_fraction": 0.5758865475654602,
"alphanum_fraction": 0.6070922017097473,
"avg_line_length": 27.239999771118164,
"blob_id": "1659db9b278bc0c5555981e453882c6793d94ab0",
"content_id": "340eb173d7821c649b99e054d9482d09f783d277",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 705,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 25,
"path": "/smooth/smooth_An.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "from smooth import *\n\nclass TypeA(SmoothnessChecker):\n def __init__(self,n):\n super().__init__(\"A{}\".format(n),n+1)\n self.bad_patterns = ([3,4,1,2], [4,2,3,1])\n\n def letter_to_oneline(self, c, L=None):\n \"\"\"Given a letter e.g. 'a', updates a list by applying the permutation.\"\"\"\n i = self.alphabet.index(c)\n if L is None:\n L = [x for x in range(self.n)]\n L[i],L[i+1] = L[i+1],L[i]\n return L\n\n# A3 principal balanced ideals\nA3 = TypeA(3)\nA3.schubert_smooth(\"caba\")\nA3.schubert_smooth(\"abcb\")\n\n# A5 principal balanced ideals\nA5 = TypeA(5)\nA5.schubert_smooth(\"abacdebcdbcb\")\nA5.schubert_smooth(\"deabcdabcaba\")\nA5.schubert_smooth(\"aedbcabaded\")"
},
{
"alpha_fraction": 0.5730927586555481,
"alphanum_fraction": 0.5894243717193604,
"avg_line_length": 29.722806930541992,
"blob_id": "b869c9e1102fbc732675e29604ea0566d43ccf1b",
"content_id": "1d3f317b3f1d71dcbd1ad6213da3955aa67b1f6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 8756,
"license_type": "no_license",
"max_line_length": 245,
"num_lines": 285,
"path": "/enumerate_rewrite/enumerate.c",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#include \"thickenings.h\"\n#include \"weyl.h\"\n#include \"queue.h\"\n\n#include <strings.h>\n#include <stdio.h>\n\nchar stringbuffer[100];\nchar stringbuffer2[100];\n\ntypedef struct {\n doublequotient_t *dq;\n int rank;\n int order;\n int positive;\n int *buffer;\n int level;\n} info_t;\n\nstatic char* alphabetize(weylgroup_element_t *e, char *str)\n{\n if(e->wordlength == 0)\n sprintf(str, \"1\");\n else {\n for(int j = 0; j < e->wordlength; j++)\n str[j] = e->word[j] + 'a';\n str[e->wordlength] = 0;\n }\n\n return str;\n}\n\nvoid balanced_thickening_callback(const bitvec_t *pos, int size, const enumeration_info_t *ei)\n{\n static long totcount = 0;\n\n if(ei->callback_data) {\n info_t *info = (info_t*)ei->callback_data;\n\n unsigned long right_invariance = FIRSTBITS(info->rank);\n unsigned long left_invariance = FIRSTBITS(info->rank);\n\n int bit1, bit2left, bit2right, left, right;\n\n for(int i = 0; i < size; i++) {\n bit1 = i < size/2 ? bv_get_bit(pos, i) : !bv_get_bit(pos, size - 1 - i);\n for(int j = 0; j < info->rank; j++) {\n\tleft = info->dq->cosets[i].min->left[j]->coset->index;\n\tright = info->dq->cosets[i].min->right[j]->coset->index;\n\tbit2left = left < size/2 ? bv_get_bit(pos, left) : !bv_get_bit(pos, size - 1 - left);\n\tbit2right = right < size/2 ? bv_get_bit(pos, right) : !bv_get_bit(pos, size - 1 - right);\n\tif(bit1 != bit2left)\n\t left_invariance &= ~BIT(j);\n\tif(bit1 != bit2right)\n\t right_invariance &= ~BIT(j);\n }\n }\n\n printf(\"%4ld left: \", totcount++);\n for(int j = 0; j < info->rank; j++)\n printf(\"%c\", left_invariance & (1 << j) ? j + 'a' : ' ');\n printf(\" right: \");\n for(int j = 0; j < info->rank; j++)\n printf(\"%c\", right_invariance & (1 << j) ? j + 'a' : ' ');\n\n if(info->buffer) {\n bitvec_t low, high;\n bv_copy(pos, &low);\n bv_negate(pos, &high);\n\n printf(\" gen: \");\n\n\n for(int i = 0; i < size/2; i++) {\n\tif(!bv_get_bit(&high, i))\n\t continue;\n\n\tprintf(\"%s \", alphabetize(info->dq->cosets[size-1-i].min, stringbuffer));\n\n\tbv_difference(&high, &ei->principal_neg[size-1-i], &high);\n\tbv_difference(&low, &ei->principal_pos[size-1-i], &low);\n }\n\n for(int i = size/2 - 1; i >= 0; i--) {\n\tif(!bv_get_bit(&low, i))\n\t continue;\n\n\tprintf(\"%s \", alphabetize(info->dq->cosets[i].min, stringbuffer));\n\n\tbv_difference(&low, &ei->principal_pos[i], &low);\n }\n }\n\n int max_length = 0;\n for(int i = 0; i < size/2; i++) {\n if(bv_get_bit(pos, i)) {\n\tif(info->dq->cosets[i].max->wordlength > max_length)\n\t max_length = info->dq->cosets[i].max->wordlength;\n } else {\n\tif(info->dq->cosets[size-i-1].max->wordlength > max_length)\n\t max_length = info->dq->cosets[size-i-1].max->wordlength;\n }\n }\n\n printf(\"\\n\");\n }\n}\n\nvoid balanced_thickening_simple_callback(const bitvec_t *pos, int size, const enumeration_info_t *ei)\n{\n long *count = (long*)ei->callback_data;\n\n if((++(*count)) % 100000000 == 0) {\n bv_print(stderr, pos, size/2);\n fprintf(stderr, \"\\n\");\n }\n}\n\nint main(int argc, const char *argv[])\n{\n semisimple_type_t type;\n unsigned long right_invariance, left_invariance;\n int rank, order, positive;\n int fixpoints;\n\n doublequotient_t *dq;\n\n const char *alphabet = \"abcdefghijklmnopqrstuvwxyz\";\n\n // read arguments\n\n ERROR(argc < 2, \"Too few arguments!\\n\\nUsage is '%s A2 A3' or '%s A2 A3 -abc -abc' with\\nA2,A3 simple Weyl factors and abc,abc left/right invariance.\\n\\nTo adjust output detail, set environment variable OUTPUT_LEVEL (1-4).\\n\",argv[0],argv[0]);\n\n // Count the number of simple factors in the semisimple Weyl group\n type.n = 0;\n for(int i = 0; i < argc - 1; i++) {\n // Skip any arguments that don't start with a letter A-G.\n if(argv[i+1][0] < 'A' || argv[i+1][0] > 'G')\n break;\n type.n++;\n }\n\n // Allocate memory, then read in the actual simple factors by letter/number, e.g. A5 is series 'A' and rank '5'. Series is A-G and the max rank is 9.\n type.factors = (simple_type_t*)malloc(type.n*sizeof(simple_type_t));\n for(int i = 0; i < type.n; i++) {\n type.factors[i].series = argv[i+1][0];\n type.factors[i].rank = argv[i+1][1] - '0';\n ERROR(argv[i+1][0] < 'A' || argv[i+1][0] > 'G' || argv[i+1][1] < '1' || argv[i+1][1] > '9', \"Arguments must be Xn with X out of A-G and n out of 1-9\\n\");\n }\n\n left_invariance = right_invariance = 0;\n // Additional command line arguments that were not factors are the left/right invariance.\n if(argc - type.n >= 3) {\n if(strcmp(argv[type.n + 1], \"-\") != 0){\n printf(\"%s\\n\",argv[type.n+1]);\n for(int i = 0; i < strlen(argv[type.n + 1]); i++)\n\t left_invariance |= (1 << (argv[type.n + 1][i] - 'a'));\n }\n if(strcmp(argv[type.n + 2], \"-\") != 0){\n for(int i = 0; i < strlen(argv[type.n + 2]); i++)\n\t right_invariance |= (1 << (argv[type.n + 2][i] - 'a'));\n }\n }\n\n // generate graph\n // dq is the Weyl graph\n\n dq = weyl_generate_bruhat(type, left_invariance, right_invariance);\n\n // print stuff\n\n // The system output_level sets the level of detail. This would be if you ran it with a debugger?\n int output_level = 2;\n if(getenv(\"OUTPUT_LEVEL\"))\n output_level = atoi(getenv(\"OUTPUT_LEVEL\"));\n\n rank = weyl_rank(type); // number of simple roots\n order = weyl_order(type); // number of Weyl group elements\n positive = weyl_positive(type); // number of positive roots\n\n if(output_level >= 1) {\n if(left_invariance) {\n printf(\"<\");\n for(int j = 0; j < rank; j++)\n\tif(left_invariance & BIT(j))\n\t fputc(alphabet[j], stdout);\n printf(\"> \\\\ \");\n }\n\n for(int i = 0; i < type.n; i++)\n printf(\"%s%c%d\", i == 0 ? \"\" : \" x \", type.factors[i].series, type.factors[i].rank);\n\n if(right_invariance) {\n printf(\" / <\");\n for(int j = 0; j < rank; j++)\n\tif(right_invariance & BIT(j))\n\t fputc(alphabet[j], stdout);\n printf(\">\");\n }\n fprintf(stdout, \"\\n\");\n\n // Top message printout\n fprintf(stdout, \"Rank: %d\\tOrder: %d\\tPositive Roots: %d\\tCosets: %d\\n\\n\", rank, order, positive, dq->count);\n }\n\n if(output_level >= 3) {\n fprintf(stdout, \"Shortest coset representatives: \\n\");\n for(int i = 0, wl = 0; i < dq->count; i++) {\n if(dq->cosets[i].min->wordlength > wl) {\n\tprintf(\"\\n\");\n\twl = dq->cosets[i].min->wordlength;\n }\n // fprintf(stdout, \"%s(%d) \", alphabetize(dq->cosets[i].min, stringbuffer), dq->cosets[i].max->wordlength);\n fprintf(stdout, \"%s \", alphabetize(dq->cosets[i].min, stringbuffer));\n }\n fprintf(stdout, \"\\n\\n\");\n }\n\n if(output_level >= 4) {\n fprintf(stdout, \"Bruhat order in graphviz format:\\n\");\n fprintf(stdout, \"digraph test123 {\\n\");\n for(int i = 0; i < dq->count; i++)\n for(doublecoset_list_t *current = dq->cosets[i].bruhat_lower; current; current = current->next)\n\tfprintf(stdout, \"%s -> %s;\\n\",\n\t\talphabetize(dq->cosets[i].min, stringbuffer),\n\t\talphabetize(current->to->min, stringbuffer2));\n fprintf(stdout, \"}\\n\\n\");\n }\n\n if(output_level >= 4) {\n fprintf(stdout, \"Opposites:\\n\");\n for(int i = 0; i < dq->count; i++)\n fprintf(stdout, \"%s <-> %s\\n\",\n\t alphabetize(dq->cosets[i].min, stringbuffer),\n\t alphabetize(dq->cosets[i].opposite->min, stringbuffer2));\n fprintf(stdout, \"\\n\");\n }\n\n // Check if there were no balanced ideals\n fixpoints = 0;\n for(int i = 0; i < dq->count; i++)\n if(dq->cosets[i].opposite == &dq->cosets[i]) {\n if(output_level >= 1) {\n\tif(fixpoints == 0)\n\t fprintf(stdout, \"No balanced ideals since the longest element fixes the following cosets:\");\n\tfprintf(stdout, \" %s\", alphabetize(dq->cosets[i].min, stringbuffer));\n }\n fixpoints++;\n }\n if(output_level >= 1 && fixpoints)\n fprintf(stdout, \"\\n\\n\");\n\n // If there were balanced ideals then print a message\n if(!fixpoints) {\n int *buffer = (int*)malloc(dq->count*sizeof(int));\n\n info_t info;\n info.dq = dq;\n info.rank = weyl_rank(type);\n info.order = weyl_order(type);\n info.positive = weyl_positive(type);\n info.buffer = buffer;\n info.level = output_level;\n\n ERROR(dq->count > 2*BV_BLOCKSIZE*BV_RANK, \"We can handle at most %d cosets. Increase BV_RANK if more is needed.\\n\", 2*BV_BLOCKSIZE*BV_RANK);\n\n long count;\n if(output_level >= 2) {\n fprintf(stdout, \"Balanced ideals:\\n\");\n count = enumerate_balanced_thickenings(dq, balanced_thickening_callback, &info);\n fprintf(stdout, \"\\n\");\n } else {\n long outputcount = 0;\n count = enumerate_balanced_thickenings(dq, balanced_thickening_simple_callback, &outputcount);\n }\n\n if(output_level >= 1)\n fprintf(stdout, \"Found %ld balanced ideal%s\\n\", count, count == 1 ? \"\" : \"s\");\n }\n // Deconstruct the dq\n weyl_destroy_bruhat(dq);\n free(type.factors);\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5326842665672302,
"alphanum_fraction": 0.5605006814002991,
"avg_line_length": 20.787878036499023,
"blob_id": "ac4eea6ef4bd623fc1a73d1a344daba7d711cf75",
"content_id": "5344239d78cabe06966bd38b329dc3d8ec580014",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 33,
"path": "/sage/explore_connections.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "import utils\nimport sys\nfrom collections import defaultdict\n\n\ndef len_distr(gens, lens):\n d = defaultdict(int)\n for g in gens:\n d[len(g)] += 1\n return tuple([d[l] for l in lens])\n\nd = utils.load_json_data(sys.argv[1])\nmiddle_row_len = d[\"max_len\"]//2\n\n\n\nhist = defaultdict(int)\nhist_56 = defaultdict(int)\nfor b in d[\"balanced_ideals\"]:\n max_len = 0\n min_len = d[\"max_len\"]\n for g in b[\"gen\"]:\n len_g = len(g)\n if len_g>max_len:\n max_len = len_g\n if len_g<min_len:\n min_len = len_g\n if min_len >= 5 and max_len==6:\n tuple_56 = len_distr(b[\"gen\"],(5,6))\n hist_56[tuple_56] += 1\n hist[(min_len,max_len)]+=1\nprint(hist)\nprint(hist_56)\n"
},
{
"alpha_fraction": 0.5124943256378174,
"alphanum_fraction": 0.5229440927505493,
"avg_line_length": 43.93877410888672,
"blob_id": "11c1922112696aebb11fdaa03137e6090a6f2ad1",
"content_id": "b021a46e69aa059acde74f049b56a36bd22385a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2201,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 49,
"path": "/smooth/smooth.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "class SmoothnessChecker():\n \"\"\"Superclass for smoothness checkers by cartan type. Defines shared functions.\"\"\"\n def __init__(self, type, n):\n \"\"\"bad_patterns should be defined in the subclass.\"\"\"\n self.alphabet=\"abcdefghijklmnopqrstuvwxyz\"\n self.n = n\n self.bad_patterns = ()\n print(\"\\n{} smoothness checker\".format(type))\n print(\"----------------------\")\n\n def letter_to_oneline(self, c, L=None):\n \"\"\"Implement in the subclasses\"\"\"\n return NotImplemented\n\n def word_to_oneline(self, word):\n \"\"\"Given a word e.g. 'abc', returns a permutation of length n representing the letter.\"\"\"\n L = [x for x in range(self.n)]\n for c in word[::-1]:\n L = self.letter_to_oneline(c,L=L)\n return L\n\n def piece_to_pattern(self, L):\n \"\"\"Given a 4-elt list, e.g. [1,5,2,8] converts that list to a pattern like [1,3,2,4]\"\"\"\n pattern = [0,0,0,0]\n for i in range(4):\n index = L.index(max(L))\n pattern[index] = 4-i\n L[index] = -1\n return pattern\n\n def schubert_smooth(self, word):\n \"\"\"Checks if a schubert cell is smooth using Lakshmibai-Sandhya pattern avoidance\n Specifically, checks whether the oneline representation of the word has any bad patterns.\"\"\"\n print(\"Checking ideal <{}>...\".format(word))\n oneline = self.word_to_oneline(word)\n for a in range(self.n-3):\n for b in range(a+1, self.n-2):\n for c in range(b+1, self.n-1):\n for d in range(c+1, self.n):\n piece = [oneline[a], oneline[b], oneline[c], oneline[d]]\n pattern = self.piece_to_pattern(piece)\n if pattern in self.bad_patterns:\n print(\" * Found a bad pattern\",pattern,\"at indices {} {} {} {} in {}\".format(a,b,c,d,oneline))\n print(\" * Balanced ideal <{}> is not smooth.\".format(word))\n return False\n # else:\n # print(\"Good pattern:\",pattern)\n print(\" Balanced ideal <{}> is smooth!\".format(word))\n return True"
},
{
"alpha_fraction": 0.5188897252082825,
"alphanum_fraction": 0.586738646030426,
"avg_line_length": 31.450000762939453,
"blob_id": "5b4101a8e03c62487b87bf6989331fcada2bdc2c",
"content_id": "f43229e78daefbc075ec18b28cc5295b9d9bc8d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1297,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 40,
"path": "/smooth/smooth_Bn.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "from smooth import *\n\nclass TypeB(SmoothnessChecker):\n def __init__(self, n):\n super().__init__(\"B{}\".format(n),n)\n self.bad_patterns = ([-2,-1],[1,2,-3],[1,-2,-3],[-1,2,-3],[2,-1,-3],[-2,1,-3],[3,-2,1],[2,-4,3,1],[-2,-4,3,1],[3,4,1,2],[3,4,-1,2],[-3,4,1,2],[4,1,3,-2],[4,-1,3,-2],[4,2,3,1],[4,2,3,-1],[-4,2,3,1])\n\n def letter_to_oneline(self, c, L=None):\n \"\"\"Given a letter e.g. 'a', updates a list by applying the SIGNED permutation.\"\"\"\n if L is None:\n L = [x for x in range(self.n)]\n L = L[::-1]\n if c == 'a':\n L[self.n-1] = -L[self.n-1]\n else:\n i = self.alphabet.index(c)\n L[self.n-i-1],L[self.n-i] = L[self.n-i-1],L[self.n-i]\n return L[::-1]\n\nB2 = TypeB(2)\nB2.schubert_smooth(\"ba\")\nB2.schubert_smooth(\"ab\")\n\nB3 = TypeB(3)\nB3.schubert_smooth(\"bcabab\")\nB3.schubert_smooth(\"cbabcb\")\nB3.schubert_smooth(\"ababcb\")\n\nB4 = TypeB(4)\nB4.schubert_smooth(\"dcbadcbacdc\")\nB4.schubert_smooth(\"dcababcdbcb\")\nB4.schubert_smooth(\"bcdabacbabcb\")\nB4.schubert_smooth(\"abcababcdbcb\")\n\nB5 = TypeB(5)\nB5.schubert_smooth(\"edabcababcdebcdbcb\")\nB5.schubert_smooth(\"cedcebcdabcababded\")\nB5.schubert_smooth(\"dcedbcababcdebcdbcb\")\nB5.schubert_smooth(\"bcdeabacbdcababcdbcb\")\nB5.schubert_smooth(\"abcdabcababcdebcdbcb\")"
},
{
"alpha_fraction": 0.5585315227508545,
"alphanum_fraction": 0.5736935138702393,
"avg_line_length": 29.57176399230957,
"blob_id": "6d5d6b72dcdf54c655f0efb9bf5623cf859060ab",
"content_id": "6269a9fbd5f5c697518890858b700db0d004ba73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 12993,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 425,
"path": "/enumerate_rewrite/weyldata_json.c",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#include \"thickenings.h\"\n#include \"weyl.h\"\n#include \"queue.h\"\n\n#include <strings.h>\n#include <stdio.h>\n#include <time.h>\n\nchar stringbuffer[100];\nchar stringbuffer2[100];\n\ntypedef struct {\n doublequotient_t *dq;\n int rank;\n int order;\n int positive;\n int *buffer;\n int level;\n} info_t;\n\nstatic char* alphabetize(weylgroup_element_t *e, char *str)\n{\n if(e->wordlength == 0)\n sprintf(str, \"1\");\n else {\n for(int j = 0; j < e->wordlength; j++)\n str[j] = e->word[j] + 'a';\n str[e->wordlength] = 0;\n }\n\n return str;\n}\n\nvoid json_balanced_thickening_callback(const bitvec_t *pos, int size, const enumeration_info_t *ei)\n{\n static long totcount = 0;\n\n if(ei->callback_data) {\n info_t *info = (info_t*)ei->callback_data;\n\n unsigned long right_invariance = FIRSTBITS(info->rank);\n unsigned long left_invariance = FIRSTBITS(info->rank);\n\n int bit1, bit2left, bit2right, left, right;\n\n for(int i = 0; i < size; i++) {\n bit1 = i < size/2 ? bv_get_bit(pos, i) : !bv_get_bit(pos, size - 1 - i);\n for(int j = 0; j < info->rank; j++) {\n\tleft = info->dq->cosets[i].min->left[j]->coset->index;\n\tright = info->dq->cosets[i].min->right[j]->coset->index;\n\tbit2left = left < size/2 ? bv_get_bit(pos, left) : !bv_get_bit(pos, size - 1 - left);\n\tbit2right = right < size/2 ? bv_get_bit(pos, right) : !bv_get_bit(pos, size - 1 - right);\n\tif(bit1 != bit2left)\n\t left_invariance &= ~BIT(j);\n\tif(bit1 != bit2right)\n\t right_invariance &= ~BIT(j);\n }\n }\n if (totcount!=0){\n printf(\",\\n\");\n }\n\n printf(\"{\\\"id\\\":%ld, \\\"left\\\": [\", totcount++);\n int second = 0;\n for(int j = 0; j < info->rank; j++){\n if (left_invariance & (1 << j)){\n if (second==1){\n printf(\",\");\n }\n printf(\"\\\"%c\\\"\",j + 'a');\n second = 1;\n }\n }\n printf(\"], \\\"right\\\": [\");\n second = 0;\n for(int j = 0; j < info->rank; j++){\n if (right_invariance & (1 << j) ){\n if (second==1){\n printf(\",\");\n }\n printf(\"\\\"%c\\\"\", j + 'a');\n second = 1;\n }\n }\n second = 0;\n if(info->buffer) {\n bitvec_t low, high;\n bv_copy(pos, &low);\n bv_negate(pos, &high);\n printf(\"], \\\"gen\\\": [\");\n \n for(int i = 0; i < size/2; i++) {\n\t if(!bv_get_bit(&high, i))\n\t continue;\n if (second==1)\n printf(\",\");\n\t printf(\"\\\"%s\\\"\", alphabetize(info->dq->cosets[size-1-i].min, stringbuffer));\n second = 1;\n\n\t bv_difference(&high, &ei->principal_neg[size-1-i], &high);\n\t bv_difference(&low, &ei->principal_pos[size-1-i], &low);\n }\n\n for(int i = size/2 - 1; i >= 0; i--) {\n\t if(!bv_get_bit(&low, i))\n\t continue;\n if (second==1)\n printf(\",\");\n printf(\"\\\"%s\\\"\", alphabetize(info->dq->cosets[i].min, stringbuffer));\n second = 1;\n\n\t bv_difference(&low, &ei->principal_pos[i], &low);\n }\n }\n\n int max_length = 0;\n for(int i = 0; i < size/2; i++) {\n if(bv_get_bit(pos, i)) {\n\tif(info->dq->cosets[i].max->wordlength > max_length)\n\t max_length = info->dq->cosets[i].max->wordlength;\n } else {\n\tif(info->dq->cosets[size-i-1].max->wordlength > max_length)\n\t max_length = info->dq->cosets[size-i-1].max->wordlength;\n }\n }\n\n printf(\"]}\");\n }\n}\n\nvoid json_principal_balanced_thickening_callback(const bitvec_t *pos, int i, const enumeration_info_t *ei)\n{\n static long totcount = 0;\n if (totcount>0)\n printf(\",\\n\");\n info_t *info = (info_t*)ei->callback_data;\n printf(\"{\\\"id\\\":%d, \\\"gen\\\": \", i);\n totcount++;\n printf(\"\\\"%s\\\"\", alphabetize(&info->dq->group[i], stringbuffer));\n printf(\"}\");\n}\n\nvoid json_core_callback(const bitvec_t *pos, int i, const enumeration_info_t *ei)\n{\n static long totcount = 0;\n if (totcount>0)\n printf(\",\");\n info_t *info = (info_t*)ei->callback_data;\n totcount++;\n printf(\"\\\"%s\\\"\", alphabetize(&info->dq->group[i], stringbuffer));\n}\n\nvoid json_balanced_thickening_simple_callback(const bitvec_t *pos, int size, const enumeration_info_t *ei)\n{\n long *count = (long*)ei->callback_data;\n\n if((++(*count)) % 100000000 == 0) {\n bv_print(stderr, pos, size/2);\n fprintf(stderr, \"\\n\");\n }\n}\n\nint main(int argc, const char *argv[])\n{\n semisimple_type_t type;\n int rank, order, positive;\n int fixpoints;\n const char* commands[3];\n commands[0] = \"all\";\n commands[1] = \"elts\";\n commands[2] = \"ideals\";\n\n doublequotient_t *dq;\n\n const char *alphabet = \"abcdefghijklmnopqrstuvwxyz\";\n\n // read arguments\n\n // Add back in the left/right invariance\n ERROR(argc < 3, \"Error: too few arguments!\\n\\nUsage is \\\"%s CMD A2A3\\\" with\\nCMD one of 'json','elts','ideals', or 'graphviz' and A2,A3 simple Weyl factors.\\n\\n\",argv[0]);\n\n \n // Count the number of simple factors in the semisimple Weyl group\n type.n = strlen(argv[2])/2;\n // fprintf(stdout, \"type.n=%d\\n\\n\",type.n);\n\n // Allocate memory, then read in the actual simple factors by letter/number, e.g. A5 is series 'A' and rank '5'. Series is A-G and the max rank is 9.\n type.factors = (simple_type_t*)malloc(type.n*sizeof(simple_type_t));\n int index_shift = 0;\n int new_rank = 0;\n int new_n = 0;\n for(int i = 0; i < type.n; i++) {\n if (2*i+index_shift+1>2*type.n){\n break;\n }\n type.factors[i].series = argv[2][2*i+index_shift];\n new_rank = argv[2][2*i+1+index_shift] - '0';\n while(2*i+1+index_shift<2*type.n && argv[2][2*i+1+index_shift+1]>='0' && argv[2][2*i+1+index_shift+1] <= '9'){\n new_rank = new_rank*10 + (argv[2][2*i+1+index_shift+1] - '0');\n index_shift = index_shift+1;\n }\n type.factors[i].rank = new_rank;\n new_n = i+1;\n // fprintf(stdout, \"type.factors[%d].series=%c, type.factors[%d].rank=%d\\n\\n\",i,type.factors[i].series,i,type.factors[i].rank);\n // ERROR(argv[1][2*i] < 'A' || argv[1][2*i] > 'G' || argv[1][2*i+1] < '1' || argv[1][2*i+1] > '9', \"Arguments must be Xn with X out of A-G and n out of 1-9\\n\");\n }\n type.n = new_n;\n\n rank = weyl_rank(type); // number of simple roots\n order = weyl_order(type); // number of Weyl group elements\n positive = weyl_positive(type); // number of positive roots\n\n \n // If command is graphviz, then print only the graphviz\n if(strcmp(argv[1],\"graphviz\")==0) {\n dq = weyl_generate_bruhat(type, 0, 0);\n fprintf(stdout, \"digraph %s {\\n\",argv[2]);\n for(int i = 0; i < dq->count; i++){\n for(doublecoset_list_t *current = dq->cosets[i].bruhat_lower; current; current = current->next){\n\t fprintf(stdout, \"%s -> %s;\\n\",\n\t\t alphabetize(dq->cosets[i].min, stringbuffer),\n\t\t alphabetize(current->to->min, stringbuffer2));\n }\n }\n fprintf(stdout, \"}\\n\");\n\n // Deconstruct and return\n weyl_destroy_bruhat(dq);\n free(type.factors);\n return 0;\n }\n\n // If command is anything else, start with the general JSON output.\n\n // Create the cartan matrix\n int *cartan_matrix;\n cartan_matrix = (int*)malloc(rank*rank*sizeof(int));\n weyl_cartan_matrix(type, cartan_matrix); //cartan matrix\n \n // Create the JSON-formatted timestamp\n time_t now;\n struct tm * local;\n char buffer [80];\n time(&now);\n local = localtime(&now);\n strftime(buffer,80,\"%FT%X.000Z\",local);\n\n // Output the general JSON stuff\n fprintf(stdout,\"{\");\n fprintf(stdout,\"\\\"timestamp\\\": \\\"%s\\\",\\n\",buffer); // TODO: Make it more like JSON\n fprintf(stdout,\"\\\"creator\\\": \\\"%s\\\",\\n\",argv[0]);\n fprintf(stdout,\"\\\"version\\\": \\\"0.0.1\\\",\\n\");\n fprintf(stdout, \"\\\"cartan_type\\\": \\\"%s\\\",\\n\",argv[2]);\n fprintf(stdout, \"\\\"summands\\\": [\");\n for (int i=0; i<type.n; i++){\n fprintf(stdout, \"\\\"%c%d\\\"\",type.factors[i].series,type.factors[i].rank);\n if (i<type.n-1) {\n fprintf(stdout, \",\");\n }\n }\n fprintf(stdout, \"],\\n\");\n fprintf(stdout, \"\\\"rank\\\": %d,\\n\\\"weyl_order\\\": %d,\\n\\\"max_len\\\": %d,\\n\", rank, order, positive);\n \n // Print out the cartan matrix, formatted nicely\n fprintf(stdout, \"\\\"cartan_matrix\\\":\\n[\");\n for (int i=0; i<rank; i++) {\n fprintf(stdout, \"[ \");\n for (int j=0; j<rank; j++){\n // Make the spacing nice\n if (cartan_matrix[rank*i+j]>=0) {\n fprintf(stdout, \" \");\n }\n fprintf(stdout, \"%d\",cartan_matrix[rank*i+j]);\n if (j<rank-1) {\n fprintf(stdout, \", \");\n } else {\n fprintf(stdout, \" \");\n }\n }\n if (i<rank-1){\n fprintf(stdout, \"],\\n \");\n } else {\n fprintf(stdout, \"]]\");\n }\n }\n\n // print out weylgroup elements\n if (strcmp(argv[1],\"elts\")==0) {\n weylgroup_t *wgroup = weyl_generate(type); // TODO: This makes the code take much longer to run\n fprintf(stdout, \",\\n\\\"elements\\\": [\");\n for (int i=0; i<order; i++){\n if (i!= 0){\n fprintf(stdout, \", \");\n }\n fprintf(stdout, \"\\\"%s\\\"\", alphabetize(&wgroup->elements[i], stringbuffer));\n }\n fprintf(stdout,\"]\");\n weyl_destroy(wgroup);\n }\n \n // Print out the principal balanced ideals\n if (strcmp(argv[1],\"principal\")==0) {\n dq = weyl_generate_bruhat(type, 0, 0);\n // Check if there were no balanced ideals\n fixpoints = 0;\n for(int i = 0; i < dq->count; i++)\n if(dq->cosets[i].opposite == &dq->cosets[i]) {\n if(fixpoints == 0)\n fprintf(stdout, \"No balanced ideals since the longest element fixes the following cosets:\");\n fprintf(stdout, \" %s\", alphabetize(dq->cosets[i].min, stringbuffer));\n fixpoints++;\n }\n if(fixpoints)\n fprintf(stdout, \"\\n\\n\");\n\n // If there were balanced ideals then print a message\n if(!fixpoints) {\n int *buffer = (int*)malloc(dq->count*sizeof(int));\n\n info_t info;\n info.dq = dq;\n info.rank = weyl_rank(type);\n info.order = weyl_order(type);\n info.positive = weyl_positive(type);\n info.buffer = buffer;\n\n ERROR(dq->count > 2*BV_BLOCKSIZE*BV_RANK, \"We can handle at most %d cosets. Increase BV_RANK if more is needed.\\n\", 2*BV_BLOCKSIZE*BV_RANK);\n\n long count;\n fprintf(stdout, \",\\n\\\"principal_balanced_ideals\\\":\\n[\");\n count = enumerate_principal_balanced_thickenings(dq, json_principal_balanced_thickening_callback, &info);\n fprintf(stdout, \"],\");\n\n fprintf(stdout, \"\\n\\\"num_principal_balanced_ideals\\\":%ld\", count);\n }\n // Deconstruct the dq\n weyl_destroy_bruhat(dq);\n }\n\n // Print out the principal balanced ideals\n if (strcmp(argv[1],\"core\")==0) {\n dq = weyl_generate_bruhat(type, 0, 0);\n // Check if there were no balanced ideals\n fixpoints = 0;\n for(int i = 0; i < dq->count; i++)\n if(dq->cosets[i].opposite == &dq->cosets[i]) {\n if(fixpoints == 0)\n fprintf(stdout, \"No balanced ideals since the longest element fixes the following cosets:\");\n fprintf(stdout, \" %s\", alphabetize(dq->cosets[i].min, stringbuffer));\n fixpoints++;\n }\n if(fixpoints)\n fprintf(stdout, \"\\n\\n\");\n\n // If there were balanced ideals then print a message\n if(!fixpoints) {\n int *buffer = (int*)malloc(dq->count*sizeof(int));\n\n info_t info;\n info.dq = dq;\n info.rank = weyl_rank(type);\n info.order = weyl_order(type);\n info.positive = weyl_positive(type);\n info.buffer = buffer;\n\n ERROR(dq->count > 2*BV_BLOCKSIZE*BV_RANK, \"We can handle at most %d cosets. Increase BV_RANK if more is needed.\\n\", 2*BV_BLOCKSIZE*BV_RANK);\n\n long count;\n fprintf(stdout, \",\\n\\\"core\\\": [\");\n count = enumerate_core(dq, json_core_callback, &info);\n fprintf(stdout, \"],\");\n\n fprintf(stdout, \"\\n\\\"num_core\\\":%ld\", count);\n }\n // Deconstruct the dq\n weyl_destroy_bruhat(dq);\n }\n\n\n\n // Print out the balanced ideals\n if (strcmp(argv[1],\"ideals\")==0) {\n dq = weyl_generate_bruhat(type, 0, 0);\n // Check if there were no balanced ideals\n fixpoints = 0;\n for(int i = 0; i < dq->count; i++)\n if(dq->cosets[i].opposite == &dq->cosets[i]) {\n if(fixpoints == 0)\n fprintf(stdout, \"No balanced ideals since the longest element fixes the following cosets:\");\n fprintf(stdout, \" %s\", alphabetize(dq->cosets[i].min, stringbuffer));\n fixpoints++;\n }\n if(fixpoints)\n fprintf(stdout, \"\\n\\n\");\n\n // If there were balanced ideals then print a message\n if(!fixpoints) {\n int *buffer = (int*)malloc(dq->count*sizeof(int));\n\n info_t info;\n info.dq = dq;\n info.rank = weyl_rank(type);\n info.order = weyl_order(type);\n info.positive = weyl_positive(type);\n info.buffer = buffer;\n\n ERROR(dq->count > 2*BV_BLOCKSIZE*BV_RANK, \"We can handle at most %d cosets. Increase BV_RANK if more is needed.\\n\", 2*BV_BLOCKSIZE*BV_RANK);\n\n long count;\n fprintf(stdout, \",\\n\\\"balanced_ideals\\\":\\n[\");\n count = enumerate_balanced_thickenings(dq, json_balanced_thickening_callback, &info);\n fprintf(stdout, \"],\");\n\n fprintf(stdout, \"\\n\\\"num_balanced_ideals\\\":%ld\", count);\n }\n // Deconstruct the dq\n weyl_destroy_bruhat(dq);\n }\n fprintf(stdout, \"\\n}\\n\");\n\n // free memory back\n free(type.factors);\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5958729982376099,
"alphanum_fraction": 0.6076058149337769,
"avg_line_length": 28.726829528808594,
"blob_id": "c186aa292418756831740db6c3d2c76ebab8ea54",
"content_id": "aa036bd70056da6576cba14a1ca15380fcc06318",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 24376,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 820,
"path": "/enumerate_rewrite/weyl.c",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#include \"weyl.h\"\n#include \"queue.h\"\n\n#include <stdio.h>\n#include <memory.h>\n#include <stdlib.h>\n\n#define BIT(n) ((uint64_t)1 << (n))\n\ntypedef struct {\n weylid_t id;\n int position;\n} weylid_lookup_t;\n\nstatic void generate_left_and_ids(semisimple_type_t type, weylgroup_element_t *group);\nstatic int search(const void *key, const void *base, size_t nmem, size_t size, int (*compar) (const void *, const void *, void *), void *arg);\nstatic int compare_root_vectors(int rank, const int *x, const int *y);\nstatic int compare_root_vectors_qsort(const void *x, const void *y, void *arg);\nstatic int compare_weylid(const void *x, const void *y);\nstatic int compare_weylid_lookup(const void *x, const void *y);\nstatic int lookup_id(weylid_t id, weylid_lookup_t *list, int len);\nstatic weylid_t multiply_generator(int s, weylid_t w, const int *simple, const int *mapping, int rank, int positive);\nstatic void reflect_root_vector(const int *cartan, int rank, int i, int *old, int *new);\nstatic weylgroup_element_t* apply_word(int *word, int len, weylgroup_element_t *current);\nstatic weylgroup_element_t* apply_word_reverse(int *word, int len, weylgroup_element_t *current);\n\n/******** generate_left_and_ids and a pile of helper functions **************/\n\nstatic void generate_left_and_ids(semisimple_type_t type, weylgroup_element_t *group)\n{\n int rank = weyl_rank(type);\n int order = weyl_order(type);\n int positive = weyl_positive(type);\n\n queue_t queue;\n int current;\n int roots_known, elements, length_elements, nextids_count;\n int *cartan_matrix;\n int *root_vectors;\n int *vector;\n int *simple_roots;\n int *root_mapping;\n weylid_t *ids, *edges, *nextids;\n weylid_lookup_t *lookup;\n\n // allocate temporary stuff\n\n cartan_matrix = (int*)malloc(rank*rank *sizeof(int));\n root_vectors = (int*)malloc(2*positive*rank*sizeof(int));\n vector = (int*)malloc(rank *sizeof(int));\n root_mapping = (int*)malloc(positive*rank *sizeof(int));\n simple_roots = (int*)malloc(rank *sizeof(int));\n ids = (weylid_t*)malloc(order *sizeof(weylid_t));\n edges = (weylid_t*)malloc(rank*order *sizeof(weylid_t));\n nextids = (weylid_t*)malloc(rank*order *sizeof(weylid_t));\n lookup = (weylid_lookup_t*)malloc(order *sizeof(weylid_lookup_t));\n\n // get all information on the cartan type\n LOG(\"Get Cartan matrix.\\n\");\n\n weyl_cartan_matrix(type, cartan_matrix);\n\n // enumerate roots, first the simple ones, then all others by reflecting\n LOG(\"Enumerate roots.\\n\");\n\n memset(root_vectors, 0, 2*positive*rank*sizeof(int));\n roots_known = 0;\n\n queue_init(&queue);\n for(int i = 0; i < rank; i++) {\n root_vectors[rank*i + i] = 1; // (r_i)_j = delta_ij\n queue_put(&queue, i);\n roots_known++;\n }\n\n while((current = queue_get(&queue)) != -1) {\n for(int i = 0; i < rank; i++) {\n reflect_root_vector(cartan_matrix, rank, i, &root_vectors[rank*current], vector);\n int j;\n for(j = 0; j < roots_known; j++)\n\tif(compare_root_vectors(rank, &root_vectors[rank*j], vector) == 0)\n\t break;\n if(j == roots_known) {\n\tmemcpy(&root_vectors[rank*roots_known], vector, rank*sizeof(int));\n\tqueue_put(&queue, roots_known);\n\troots_known++;\n }\n }\n }\n\n ERROR(roots_known != 2*positive, \"Number of roots does not match!\\n\");\n\n // sort roots and restrict to positives\n LOG(\"Sort roots.\\n\");\n\n qsort_r(root_vectors, 2*positive, rank*sizeof(int), compare_root_vectors_qsort, &rank);\n memcpy(root_vectors, &root_vectors[positive*rank], positive*rank*sizeof(int)); // this just copies the second part of the list onto the first; source and destination are disjoint!\n\n // generate root_mapping, which gives the action of root reflections on positive roots (-1 if result is not a positive root)\n LOG(\"Compute root reflections.\\n\");\n\n for(int i = 0; i < positive; i++) {\n for(int j = 0; j < rank; j++) {\n reflect_root_vector(cartan_matrix, rank, j, &root_vectors[rank*i], vector);\n root_mapping[i*rank+j] =\n\tsearch(vector, root_vectors, positive, rank*sizeof(int), compare_root_vectors_qsort, &rank);\n }\n }\n\n // find simple roots in the list\n LOG(\"Find simple roots.\\n\");\n\n for(int i = 0; i < rank; i++) {\n memset(vector, 0, rank*sizeof(int));\n vector[i] = 1;\n simple_roots[i] = search(vector, root_vectors, positive, rank*sizeof(int), compare_root_vectors_qsort, &rank);\n }\n\n // enumerate weyl group elements using difference sets\n LOG(\"Enumerate Weyl group elements.\\n\");\n\n nextids[0] = 0;\n nextids_count = 1;\n elements = 0;\n for(int len = 0; len <= positive; len++) {\n length_elements = 0;\n\n // find unique ids in edges added in the last iteration\n qsort(nextids, nextids_count, sizeof(weylid_t), compare_weylid);\n for(int i = 0; i < nextids_count; i++)\n if(i == 0 || nextids[i] != nextids[i-1])\n\tids[elements + length_elements++] = nextids[i];\n\n // add new edges\n nextids_count = 0;\n for(int i = elements; i < elements + length_elements; i++)\n for(int j = 0; j < rank; j++) {\n\tedges[i*rank+j] = multiply_generator(j, ids[i], simple_roots, root_mapping, rank, positive);\n\tif(!(ids[i] & BIT(simple_roots[j]))) // the new element is longer then the old one\n\t nextids[nextids_count++] = edges[i*rank+j];\n }\n\n elements += length_elements;\n }\n\n // translate the ids to list positions (i.e. local continuous ids)\n LOG(\"Reorder Weyl group elements.\\n\");\n\n for(int i = 0; i < order; i++) {\n lookup[i].id = ids[i];\n lookup[i].position = i;\n }\n qsort(lookup, order, sizeof(weylid_lookup_t), compare_weylid_lookup);\n\n // fill in results\n LOG(\"Compute left multiplication.\\n\");\n\n for(int i = 0; i < order; i++) {\n group[i].id = ids[i];\n for(int j = 0; j < rank; j++)\n group[i].left[j] = group + lookup_id(edges[i*rank+j], lookup, order);\n }\n\n // free temporary stuff\n\n free(cartan_matrix);\n free(root_vectors);\n free(vector);\n free(root_mapping);\n free(simple_roots);\n free(ids);\n free(edges);\n free(nextids);\n free(lookup);\n}\n\n// glibc search function, but with user pointer and returning index (or -1 if not found)\nstatic int search(const void *key, const void *base, size_t nmemb, size_t size, int (*compar) (const void *, const void *, void *), void *arg)\n{\n size_t l, u, idx;\n const void *p;\n int comparison;\n\n l = 0;\n u = nmemb;\n while (l < u) {\n idx = (l + u) / 2;\n p = (void *) (((const char *) base) + (idx * size));\n comparison = (*compar) (key, p, arg);\n if (comparison < 0)\n u = idx;\n else if (comparison > 0)\n l = idx + 1;\n else\n return idx;\n }\n\n return -1;\n}\n\n// maybe we want a different ordering here?\nstatic int compare_root_vectors(int rank, const int *x, const int *y)\n{\n for(int i = 0; i < rank; i++)\n if(x[i] != y[i])\n return x[i] - y[i];\n\n return 0;\n}\n\nstatic int compare_root_vectors_qsort(const void *x, const void *y, void *arg)\n{\n return compare_root_vectors(*((int*)arg), x, y);\n}\n\nstatic int compare_weylid(const void *x, const void *y)\n{\n weylid_t u = *((weylid_t*)x);\n weylid_t v = *((weylid_t*)y);\n\n return u > v ? 1 : u < v ? -1 : 0;\n}\n\nstatic int compare_weylid_lookup(const void *x, const void *y)\n{\n weylid_t u = ((weylid_lookup_t*)x)->id;\n weylid_t v = ((weylid_lookup_t*)y)->id;\n\n return u > v ? 1 : u < v ? -1 : 0;\n}\n\nstatic int lookup_id(weylid_t id, weylid_lookup_t *list, int len)\n{\n weylid_lookup_t key;\n key.id = id;\n weylid_lookup_t *p = (weylid_lookup_t*)bsearch(&key, list, len, sizeof(weylid_lookup_t), compare_weylid_lookup);\n return p->position;\n}\n\nstatic weylid_t multiply_generator(int s, weylid_t w, const int* simple, const int* mapping, int rank, int positive)\n{\n weylid_t sw = 0;\n\n for(int i = 0; i < positive; i++) {\n if(w & BIT(i))\n if(mapping[i*rank+s] != -1)\n\tsw |= BIT(mapping[i*rank+s]);\n }\n\n if(w & BIT(simple[s]))\n return sw;\n else\n return sw | BIT(simple[s]);\n}\n\nstatic void reflect_root_vector(const int *cartan, int rank, int i, int *old, int *new)\n{\n memcpy(new, old, rank*sizeof(int));\n for(int j = 0; j < rank; j++)\n new[i] -= cartan[i*rank + j]*old[j];\n}\n\n/************* Weyl group infos ************************/\n\nstatic int weyl_exists(simple_type_t type)\n{\n if(type.series < 'A' || type.series > 'G' || type.rank < 1 ||\n type.series == 'B' && type.rank < 2 ||\n type.series == 'C' && type.rank < 2 ||\n type.series == 'D' && type.rank < 3 ||\n type.series == 'E' && type.rank != 6 && type.rank != 7 && type.rank != 8 ||\n type.series == 'F' && type.rank != 4 ||\n type.series == 'G' && type.rank != 2)\n return 0;\n else\n return 1;\n}\n\nint weyl_rank(semisimple_type_t type)\n{\n // Total rank of the Weyl group, which is the sum of the factor ranks.\n int rank = 0;\n for(int i = 0; i < type.n; i++)\n rank += type.factors[i].rank;\n return rank;\n}\n\nint weyl_order(semisimple_type_t type)\n{\n // Total order of the Weyl group, which is the product of the orders of the factors. E.g. order(An) = (n+1)!, order(Bn) = (n+1)! * 2^(n+1)\n int order = 1;\n for(int i = 0; i < type.n; i++) {\n ERROR(!weyl_exists(type.factors[i]), \"A Weyl group of type %c%d does not exist!\\n\", type.factors[i].series, type.factors[i].rank);\n\n switch(type.factors[i].series) {\n case 'A':\n for(int j = 1; j <= type.factors[i].rank + 1; j++)\n\torder *= j;\n break;\n\n case 'B': case 'C':\n for(int j = 1; j <= type.factors[i].rank; j++)\n\torder *= 2*j;\n break;\n\n case 'D':\n for(int j = 2; j <= type.factors[i].rank; j++)\n\torder *= 2*j;\n break;\n\n case 'E':\n if(type.factors[i].rank == 6)\n\torder *= 51840;\n else if(type.factors[i].rank == 7)\n\torder *= 2903040;\n else if(type.factors[i].rank == 8)\n\torder *= 696729600;\n break;\n\n case 'F':\n order *= 1152;\n break;\n\n case 'G':\n order *= 12;\n break;\n }\n }\n\n return order;\n}\n\nint weyl_positive(semisimple_type_t type)\n{\n // Maximum length, which is the sum of the maximum length of the summands. This is the length for w_0 in W.\n int positive = 0;\n\n for(int i = 0; i < type.n; i++) {\n ERROR(!weyl_exists(type.factors[i]), \"A Weyl group of type %c%d does not exist!\\n\", type.factors[i].series, type.factors[i].rank);\n\n switch(type.factors[i].series) {\n case 'A':\n positive += (type.factors[i].rank * (type.factors[i].rank + 1)) / 2;\n break;\n\n case 'B': case 'C':\n positive += type.factors[i].rank * type.factors[i].rank;\n break;\n\n case 'D':\n positive += type.factors[i].rank * (type.factors[i].rank - 1);\n break;\n\n case 'E':\n if(type.factors[i].rank == 6)\n\tpositive += 36;\n else if(type.factors[i].rank == 7)\n\tpositive += 63;\n else if(type.factors[i].rank == 8)\n\tpositive += 120;\n break;\n\n case 'F':\n positive += 24;\n break;\n\n case 'G':\n positive += 6;\n break;\n }\n }\n\n return positive;\n}\n\nint weyl_opposition(semisimple_type_t type, int simple_root)\n{\n int offset = 0;\n int factor = 0;\n int r, iota_r;\n\n for(factor = 0; factor < type.n; factor++)\n if(simple_root < offset + type.factors[factor].rank)\n break;\n else\n offset += type.factors[factor].rank;\n r = simple_root - offset;\n\n ERROR(!weyl_exists(type.factors[factor]), \"A Weyl group of type %c%d does not exist!\\n\", type.factors[factor].series, type.factors[factor].rank);\n\n switch(type.factors[factor].series) {\n case 'B': case 'C': case 'F': case 'G':\n iota_r = r;\n break;\n\n case 'A':\n iota_r = type.factors[factor].rank - 1 - r;\n break;\n\n case 'D':\n if(type.factors[factor].rank % 2 == 0)\n iota_r = r;\n else\n iota_r = r == 0 ? 1 : r == 1 ? 0 : r;\n break;\n\n case 'E':\n if(type.factors[factor].rank != 6)\n iota_r = r;\n else\n iota_r = r == 2 || r == 3 ? r : 5 - r;\n break;\n }\n\n return iota_r + offset;\n}\n\nvoid weyl_cartan_matrix(semisimple_type_t type, int *m)\n{\n int offset = 0;\n int rank = weyl_rank(type);\n\n int **A = (int**)malloc(rank*sizeof(int*));\n\n memset(m, 0, rank*rank*sizeof(int));\n for(int i = 0; i < rank; i++)\n m[i*rank+i] = 2;\n\n for(int k = 0; k < type.n; k++) {\n ERROR(!weyl_exists(type.factors[k]), \"A Weyl group of type %c%d does not exist!\\n\", type.factors[k].series, type.factors[k].rank);\n\n for(int i = 0; i < type.factors[k].rank; i++) // A is the submatrix corresponding to the current simple factor\n A[i] = &m[(i+offset)*rank + offset];\n\n for(int i = 1; i < type.factors[k].rank; i++) {\n A[i][i-1] = -1;\n A[i-1][i] = -1;\n }\n\n switch(type.factors[k].series) {\n case 'A':\n break;\n case 'B':\n A[0][1] = -2;\n break;\n case 'C':\n A[1][0] = -2;\n break;\n case 'D':\n A[0][1] = A[1][0] = 0;\n A[0][2] = A[2][0] = -1;\n break;\n case 'E':\n A[1][2] = A[2][1] = 0;\n A[1][3] = A[3][1] = -1;\n break;\n case 'F':\n A[2][1] = -2;\n break;\n case 'G':\n A[1][0] = -3;\n break;\n }\n\n offset += type.factors[k].rank;\n }\n\n free(A);\n}\n\n/************ weyl_generate etc. ********************/\n\nstatic weylgroup_element_t* apply_word(int *word, int len, weylgroup_element_t *current)\n{\n for(int k = len - 1; k >= 0; k--) // apply group element from right to left\n current = current->left[word[k]];\n\n return current;\n}\n\nstatic weylgroup_element_t* apply_word_reverse(int *word, int len, weylgroup_element_t *current)\n{\n for(int k = 0; k < len; k++) // apply group element from left to right (i.e. apply inverse)\n current = current->left[word[k]];\n\n return current;\n}\n\nweylgroup_t *weyl_generate(semisimple_type_t type)\n{\n int rank = weyl_rank(type);\n int order = weyl_order(type);\n int positive = weyl_positive(type);\n\n ERROR(positive > 64, \"We can't handle root systems with more than 64 positive roots!\\n\");\n\n // allocate result\n\n weylgroup_element_t *group = (weylgroup_element_t*)malloc(order*sizeof(weylgroup_element_t));\n weylgroup_t *result = malloc(sizeof(weylgroup_t));\n result->type = type;\n result->elements = group;\n result->lists = (weylgroup_element_t**)malloc(2*order*rank*sizeof(weylgroup_element_t*));\n\n for(int i = 0; i < order; i++) {\n group[i].left = result->lists + 2*i*rank;\n group[i].right = result->lists + (2*i+1)*rank;\n group[i].coset = (doublecoset_t*)0;\n group[i].index = i;\n }\n\n // the main part\n LOG(\"Start generating Weyl group.\\n\");\n\n generate_left_and_ids(type, group);\n\n // word length is just the number of 1s in the binary id\n LOG(\"Find word lengths.\\n\");\n\n for(int i = 0; i < order; i++) {\n group[i].wordlength = 0;\n for(int j = 0; j < positive; j++)\n if(group[i].id & BIT(j))\n\tgroup[i].wordlength++;\n }\n\n // allocate letters\n\n int total_wordlength = 0;\n for(int i = 0; i < order; i++)\n total_wordlength += group[i].wordlength;\n result->letters = (int*)malloc(total_wordlength*sizeof(int));\n total_wordlength = 0;\n for(int i = 0; i < order; i++) {\n group[i].word = result->letters + total_wordlength;\n total_wordlength += group[i].wordlength;\n }\n\n // find shortest words (using that the elements are already ordered by word length)\n LOG(\"Find shortest words.\\n\");\n\n memset(result->letters, -1, total_wordlength*sizeof(int));\n for(int i = 0; i < order - 1; i++) {\n weylgroup_element_t *this = &group[i];\n for(int j = 0; j < rank; j++) {\n weylgroup_element_t *that = group[i].left[j];\n if(that->wordlength > this->wordlength && that->word[0] == -1) {\n\tmemcpy(that->word + 1, this->word, this->wordlength*sizeof(int));\n\tthat->word[0] = j;\n }\n }\n }\n\n // generate right edges\n LOG(\"Compute right multiplication.\\n\");\n\n for(int i = 0; i < order; i++)\n for(int j = 0; j < rank; j++)\n group[i].right[j] = apply_word(group[i].word, group[i].wordlength, group[0].left[j]);\n\n // find opposites\n LOG(\"Find opposites.\\n\");\n\n weylgroup_element_t *longest = &group[order-1];\n for(int i = 0; i < order; i++)\n group[i].opposite = apply_word(longest->word, longest->wordlength, &group[i]);\n\n // check for root reflections\n LOG(\"Find root reflections.\\n\");\n\n for(int i = 0; i < order; i++)\n group[i].is_root_reflection = 0;\n for(int i = 0; i < order; i++)\n for(int j = 0; j < rank; j++) // we want to calculate word^{-1} * j * word; this is a root reflection\n apply_word_reverse(group[i].word, group[i].wordlength, group[i].left[j]) -> is_root_reflection = 1; // TODO: What does this code do?\n\n return result;\n}\n\nvoid weyl_destroy(weylgroup_t *group)\n{\n free(group->elements);\n free(group->lists);\n free(group->letters);\n free(group);\n}\n\ndoublequotient_t *weyl_generate_bruhat(semisimple_type_t type, int left_invariance, int right_invariance)\n{\n int rank = weyl_rank(type);\n int order = weyl_order(type);\n int positive = weyl_positive(type);\n int count;\n\n int is_minimum, is_maximum;\n\n weylgroup_t *wgroup = weyl_generate(type);\n weylgroup_element_t *group = wgroup->elements;\n doublecoset_t *cosets;\n\n for(int i = 0; i < rank; i++) {\n int oppi = weyl_opposition(type, i);\n if(left_invariance & BIT(i) && !(left_invariance & BIT(oppi)) ||\n left_invariance & BIT(oppi) && !(left_invariance & BIT(i)))\n ERROR(1, \"The specified left invariance is not invariant under the opposition involution!\\n\");\n }\n\n doublequotient_t *result = (doublequotient_t*)malloc(sizeof(doublequotient_t));\n result->type = type;\n result->left_invariance = left_invariance;\n result->right_invariance = right_invariance;\n result->group = wgroup->elements;\n result->grouplists = wgroup->lists;\n result->groupletters = wgroup->letters;\n\n free(wgroup); // dissolved in result and not needed anymore\n\n LOG(\"Count cosets.\\n\"); // count cosets by finding the minimum length element in every coset\n\n count = 0;\n for(int i = 0; i < order; i++) {\n is_minimum = 1;\n for(int j = 0; j < rank; j++)\n if(left_invariance & BIT(j) && group[i].left[j]->wordlength < group[i].wordlength ||\n\t right_invariance & BIT(j) && group[i].right[j]->wordlength < group[i].wordlength)\n\tis_minimum = 0;\n if(is_minimum)\n count++;\n }\n result->count = count;\n\n // alloc more stuff\n\n cosets = result->cosets = (doublecoset_t*)malloc(count*sizeof(doublecoset_t));\n for(int i = 0; i < count; i++) {\n cosets[i].bruhat_lower = cosets[i].bruhat_higher = (doublecoset_list_t*)0;\n }\n result->lists = (doublecoset_list_t*)malloc(2*count*positive*sizeof(doublecoset_list_t)); // 2 times, for bruhat lower and higher\n\n LOG(\"Find minimal length elements in cosets.\\n\"); // basically same code as above\n\n count = 0;\n for(int i = 0; i < order; i++) {\n is_minimum = 1;\n for(int j = 0; j < rank; j++)\n if(left_invariance & BIT(j) && group[i].left[j]->wordlength < group[i].wordlength ||\n\t right_invariance & BIT(j) && group[i].right[j]->wordlength < group[i].wordlength)\n\tis_minimum = 0;\n if(is_minimum) {\n cosets[count].min = &group[i];\n group[i].coset = &cosets[count];\n count++;\n }\n }\n\n LOG(\"Generate quotient map.\\n\");\n\n for(int i = 0; i < order; i++) {\n for(int j = 0; j < rank; j++) {\n if(left_invariance & BIT(j) && group[i].left[j]->wordlength > group[i].wordlength)\n\tgroup[i].left[j]->coset = group[i].coset;\n if(right_invariance & BIT(j) && group[i].right[j]->wordlength > group[i].wordlength)\n\tgroup[i].right[j]->coset = group[i].coset;\n }\n }\n\n LOG(\"Find maximal length elements.\\n\");\n\n for(int i = 0; i < order; i++) {\n is_maximum = 1;\n for(int j = 0; j < rank; j++)\n if(left_invariance & BIT(j) && group[i].left[j]->wordlength > group[i].wordlength ||\n\t right_invariance & BIT(j) && group[i].right[j]->wordlength > group[i].wordlength)\n\tis_maximum = 0;\n if(is_maximum) {\n group[i].coset->max = &group[i];\n }\n }\n\n LOG(\"Find opposites.\\n\");\n\n for(int i = 0; i < count; i++)\n cosets[i].opposite = cosets[i].min->opposite->coset;\n\n LOG(\"Sort opposites.\\n\");\n\n int *old2newindices = (int*)malloc(count*sizeof(int));\n int *new2oldindices = (int*)malloc(count*sizeof(int));\n\n // give the cosets some temporary indices\n for(int i = 0; i < count; i++)\n cosets[i].index = i;\n\n // generate a nice ordering, where element j is opposite to n-j, except the self-opposite ones, which are in the middle\n int j = 0;\n for(int i = 0; i < count; i++)\n if(i < cosets[i].opposite->index) {\n old2newindices[i] = j;\n old2newindices[cosets[i].opposite->index] = count-1-j;\n j++;\n }\n for(int i = 0; i < count; i++)\n if(i == cosets[i].opposite->index)\n old2newindices[i] = j++;\n\n for(int i = 0; i < count; i++)\n new2oldindices[old2newindices[i]] = i;\n\n // rewrite everything in the new ordering\n doublecoset_t *oldcosets = (doublecoset_t*)malloc(count*sizeof(doublecoset_t));\n memcpy(oldcosets, cosets, count*sizeof(doublecoset_t));\n for(int i = 0; i < count; i++) {\n cosets[i].min = oldcosets[new2oldindices[i]].min;\n cosets[i].max = oldcosets[new2oldindices[i]].max;\n cosets[i].opposite = cosets + old2newindices[oldcosets[new2oldindices[i]].opposite->index];\n // cosets[i].bruhat_lower = oldcosets[new2oldindices[i]].bruhat_lower;\n // cosets[i].bruhat_higher = oldcosets[new2oldindices[i]].bruhat_higher;\n // for(doublecoset_list_t *current = cosets[i].bruhat_lower; current; current = current -> next)\n // current->to = &cosets[old2newindices[current->to->index]];\n // for(doublecoset_list_t *current = cosets[i].bruhat_higher; current; current = current -> next)\n // current->to = &cosets[old2newindices[current->to->index]];\n }\n for(int i = 0; i < order; i++)\n group[i].coset = old2newindices[group[i].coset->index] + cosets;\n for(int i = 0; i < count; i++) // do this in the end, so we can use the \"index\" attribute before to translate pointers to indices\n cosets[i].index = i;\n\n free(old2newindices);\n free(new2oldindices);\n free(oldcosets);\n\n LOG(\"Find bruhat order.\\n\");\n\n int edgecount = 0;\n for(int i = 0; i < order; i++) {\n if(group[i].is_root_reflection) {\n for(int j = 0; j < count; j++) {\n\tweylgroup_element_t *this = cosets[j].min;\n\tweylgroup_element_t *that = apply_word(group[i].word, group[i].wordlength, cosets[j].min);\n\tif(this->wordlength > that->wordlength) { // this is higher in bruhat order than that\n\t doublecoset_list_t *new = &result->lists[edgecount++];\n\t new->next = this->coset->bruhat_lower;\n\t this->coset->bruhat_lower = new;\n\t new->to = that->coset;\n\t}\n }\n }\n }\n\n LOG(\"Perform transitive reduction.\\n\"); // eliminating redudant order relations\n\n doublecoset_t *origin;\n doublecoset_list_t *current;\n doublecoset_list_t *prev;\n queue_t queue;\n int idx;\n int *seen = malloc(count*sizeof(int));\n for(int i = 0; i < count; i++) {\n memset(seen, 0, count*sizeof(int));\n queue_init(&queue);\n\n for(int len = 1; len <= cosets[i].min->wordlength; len++) {\n\n // remove all edges originating from i of length len which connect to something already seen using shorter edges\n origin = &cosets[i];\n prev = (doublecoset_list_t*)0;\n\n for(current = origin->bruhat_lower; current; current = current->next) {\n\tif(origin->min->wordlength - current->to->min->wordlength != len) {\n\t prev = current;\n\t} else if(seen[current->to->index]) {\n\t if(prev)\n\t prev->next = current->next;\n\t else\n\t origin->bruhat_lower = current->next;\n\t} else {\n\t prev = current;\n\t seen[current->to->index] = 1;\n\t queue_put(&queue, current->to->index);\n\t}\n }\n\n // see which nodes we can reach using only edges up to length len, mark them as seen\n while((idx = queue_get(&queue)) != -1) {\n\tcurrent = cosets[idx].bruhat_lower;\n\tfor(current = cosets[idx].bruhat_lower; current; current = current->next) {\n\t if(!seen[current->to->index]) {\n\t seen[current->to->index] = 1;\n\t queue_put(&queue, current->to->index);\n\t }\n\t}\n }\n }\n }\n\n free(seen);\n\n LOG(\"Revert bruhat order.\\n\");\n\n for(int i = 0; i < count; i++) {\n for(current = cosets[i].bruhat_lower; current; current = current->next) {\n doublecoset_list_t *new = &result->lists[edgecount++];\n new->to = &cosets[i];\n new->next = current->to->bruhat_higher;\n current->to->bruhat_higher = new;\n }\n }\n\n return result;\n}\n\nvoid weyl_destroy_bruhat(doublequotient_t *dq)\n{\n free(dq->group);\n free(dq->grouplists);\n free(dq->groupletters);\n free(dq->cosets);\n free(dq->lists);\n free(dq);\n}\n"
},
{
"alpha_fraction": 0.5826396942138672,
"alphanum_fraction": 0.5933412313461304,
"avg_line_length": 24.515151977539062,
"blob_id": "a815a82ba27d2f6a8dc4d958e5d06a667a83b74d",
"content_id": "408c3ac2f92cb1bc84e85c438067eea2e4477bad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 841,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 33,
"path": "/equiv_class/make_equivs.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "import equivalence_class\nimport utils\nimport time\n\n# for i in range(1,9):\n\n\nA = equivalence_class.equiv_tree(1)\nstart = time.time()\nfor i in range(1,8):\n A.up_cartan(i)\n A.make_equiv_class()\n end = time.time()\n print(\"Time diff {}: {:.4f}s\".format(i,end-start))\n start = end\n # d = {}\n # utils.add_header(d)\n # cartan = \"A\"+str(i)\n # filename = cartan + \".json\"\n # A = equivalence_class.equiv_tree(i)\n # A.make_equiv_class()\n # d[\"cartan_type\"] = cartan\n # d[\"equiv_class\"] = equivalence_class.to_str_list(A.equiv_nodes())\n # d[\"num_equiv_class\"] = len(d[\"equiv_class\"])\n # utils.save_json_data(filename,d)\n\nstart = time.time()\nfor i in range(1,8):\n A = equivalence_class.equiv_tree(i)\n A.make_equiv_class()\n end = time.time()\n print(\"{}: {:.4f}s\".format(i,end-start))\n start = end"
},
{
"alpha_fraction": 0.5551234483718872,
"alphanum_fraction": 0.5671786665916443,
"avg_line_length": 23.966018676757812,
"blob_id": "1160a90df77b5fc70ed35dbafc3d811d31b77ca9",
"content_id": "a415136b12950335841b8d37f229bc4700233ea8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5143,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 206,
"path": "/enumerate_rewrite/bitvec.h",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "/*********************************************************************\n * Filename: bitvec.h\n *\n * Description: Bit vectors implemented as uint64_t arrays, size\n * fixed at compile time (in weylconfig.h). Supports\n * efficient set operations: Union, difference, count.\n * Uses SSE4 64-bit popcount instruction.\n *\n * Author: David Dumas <[email protected]>\n *\n * This program is free software distributed under the MIT license.\n * See the file LICENSE for details.\n ********************************************************************/\n\n#ifndef __BITVEC_H__\n#define __BITVEC_H__ 1\n\n#include <string.h>\n\n#include <inttypes.h>\n\n#include <stdio.h>\n#include <stdlib.h>\n\n// FIRSTBITS(n) only yields useful result when 0 <= n < 64\n#define BV_RANK 10\n#define BV_BLOCKSIZE 64\n\n#define FIRSTBITS(n) (((uint64_t)1 << (n)) - 1l)\n#define BIT(n) (((uint64_t)1 << (n)))\n#define ALLBITS ((uint64_t)-1)\n#define BLOCK(n) ((n)/64)\n#define INDEX(n) ((n)%64)\n\ntypedef struct {\n uint64_t v[BV_RANK];\n} bitvec_t;\n\nstatic inline void bv_clear_bit(bitvec_t *x, int k)\n{\n x->v[BLOCK(k)] &= ~BIT(INDEX(k));\n}\n\nstatic inline void bv_set_bit(bitvec_t *x, int k)\n{\n x->v[BLOCK(k)] |= BIT(INDEX(k));\n}\n\nstatic inline int bv_get_bit(const bitvec_t *x, int k)\n{\n return (x->v[BLOCK(k)] >> INDEX(k)) & 0x1;\n}\n\nstatic inline void bv_clear(bitvec_t *x)\n{\n int i;\n for (i=0;i<BV_RANK;i++)\n x->v[i] = 0;\n}\n\nstatic inline int bv_is_zero(const bitvec_t *x)\n{\n int i;\n for (i=0;i<BV_RANK;i++)\n if (x->v[i])\n return 0;\n\n return 1;\n}\n\nstatic inline void bv_print(FILE *f, const bitvec_t *x, int len)\n{\n for(int i = 0; i < len; i++) {\n fputc(bv_get_bit(x, i) ? '1' : '0', f);\n // if(i % BLOCKSIZE == BLOCKSIZE - 1)\n // fputc('-',f);\n }\n}\n\nstatic inline void bv_print_nice(FILE *f, const bitvec_t *pos, const bitvec_t *neg, int special, int len)\n{\n for(int i = 0; i < len; i++) {\n if(i == special)\n fputc('X', f);\n else if(bv_get_bit(pos, i) && !bv_get_bit(neg, i))\n fputc('1', f);\n else if(!bv_get_bit(pos, i) && bv_get_bit(neg, i))\n fputc('0', f);\n else if(!bv_get_bit(pos, i) && !bv_get_bit(neg, i))\n fputc(' ', f);\n else\n fputc('-', f);\n }\n}\n\nstatic inline void bv_union(const bitvec_t *x, const bitvec_t *y, bitvec_t *result)\n{\n int i;\n for (i=0; i < BV_RANK; i++) {\n result->v[i] = x->v[i] | y->v[i];\n }\n}\n\nstatic inline void bv_intersection(const bitvec_t *x, const bitvec_t *y, bitvec_t *result)\n{\n int i;\n for (i=0; i < BV_RANK; i++) {\n result->v[i] = x->v[i] & y->v[i];\n }\n}\n\nstatic inline void bv_difference(const bitvec_t *x, const bitvec_t *y, bitvec_t *result)\n{\n int i;\n for (i=0; i < BV_RANK; i++) {\n result->v[i] = x->v[i] & ~y->v[i];\n }\n}\n\nstatic inline int bv_disjoint(const bitvec_t *x, const bitvec_t *y)\n{\n for(int i = 0; i < BV_RANK; i++)\n if(x->v[i] & y->v[i])\n return 0;\n\n return 1;\n}\n\nstatic inline int bv_full(const bitvec_t *x, int len)\n{\n int i;\n for(i = 0; i < BLOCK(len); i++)\n if(x->v[i] != ALLBITS)\n return 0;\n\n return (x->v[i] & FIRSTBITS(INDEX(len))) == FIRSTBITS(INDEX(len));\n}\n\n// set bits in range start...end (including start and excluding end)\nstatic inline void bv_set_range(bitvec_t *x, int start, int end)\n{\n if(BLOCK(start) == BLOCK(end))\n x->v[BLOCK(start)] |= ~FIRSTBITS(INDEX(start)) & FIRSTBITS(INDEX(end));\n else {\n x->v[BLOCK(start)] |= ~FIRSTBITS(INDEX(start));\n for(int i = BLOCK(start) + 1; i < BLOCK(end); i++)\n x->v[i] = ALLBITS;\n x->v[BLOCK(end)] |= FIRSTBITS(INDEX(end));\n }\n}\n\n// set bits in range start...end (including start and excluding end), except if they are set in mask\nstatic inline void bv_set_range_except(bitvec_t *x, const bitvec_t *mask, int start, int end)\n{\n if(BLOCK(start) == BLOCK(end))\n x->v[BLOCK(start)] |= ~FIRSTBITS(INDEX(start)) & FIRSTBITS(INDEX(end)) & ~mask->v[BLOCK(start)];\n else {\n x->v[BLOCK(start)] |= ~FIRSTBITS(INDEX(start)) & ~mask->v[BLOCK(start)];\n for(int i = BLOCK(start) + 1; i < BLOCK(end); i++)\n x->v[i] |= ~mask->v[i];\n x->v[BLOCK(end)] |= FIRSTBITS(INDEX(end)) & ~mask->v[BLOCK(end)];\n }\n}\n\n// find least significant 0 bit starting from position start (included)\nstatic inline int bv_next_zero(const bitvec_t *x, int start)\n{\n int position;\n\n position = ffsll(~(x->v[BLOCK(start)] | FIRSTBITS(INDEX(start))));\n\n if(position)\n return BLOCK(start)*BV_BLOCKSIZE + position - 1; // found zero in same chunk\n\n for(int i = BLOCK(start) + 1; i < BV_RANK; i++) {\n position = ffsll(~x->v[i]);\n if(position) // found a 0\n return i*BV_BLOCKSIZE + position - 1;\n }\n\n return BV_RANK*BV_BLOCKSIZE; // found nothing\n}\n\nstatic inline void bv_copy(const bitvec_t *from, bitvec_t *to)\n{\n for(int i = 0; i < BV_RANK; i++)\n to->v[i] = from->v[i];\n}\n\nstatic inline void bv_negate(const bitvec_t *from, bitvec_t *to)\n{\n for(int i = 0; i < BV_RANK; i++)\n to->v[i] = ~from->v[i];\n}\n\nstatic inline int bv_count_bits(const bitvec_t *vec,int len)\n{\n int count = 0;\n for(int i=0; i<len;i++)\n {\n count += bv_get_bit((vec), i);\n }\n return count;\n}\n\n#endif /* __BITVEC_H__ */\n"
},
{
"alpha_fraction": 0.7472745180130005,
"alphanum_fraction": 0.7492566704750061,
"avg_line_length": 36.37036895751953,
"blob_id": "adc7b659856f6d03e11df2ce7f9c0781da802474",
"content_id": "8e16dc27a2b2b7446a118f89e955113ac6af3401",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1009,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 27,
"path": "/enumerate_rewrite/thickenings.h",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#ifndef THICKENINGS_H\n#define THICKENINGS_H\n\n#include \"bitvec.h\"\n#include \"weyl.h\"\n\n#define DEBUG(msg, ...) do{fprintf(stderr, msg, ##__VA_ARGS__); }while(0)\n\nstruct enumeration_info {\n int size; // the size of the weyl group. We store however only the first size/2 elements\n bitvec_t *principal_pos;\n bitvec_t *principal_neg;\n int *principal_is_slim;\n int *principal_is_fat;\n void (*callback)(const bitvec_t *, int, const struct enumeration_info *);\n void *callback_data;\n};\n\ntypedef void (*enumeration_callback)(const bitvec_t *, int, const struct enumeration_info *);\ntypedef struct enumeration_info enumeration_info_t;\n\n// enumerating balanced thickenings\nlong enumerate_balanced_thickenings(doublequotient_t *dq, enumeration_callback callback, void *callback_data);\nlong enumerate_principal_balanced_thickenings(doublequotient_t *dq, enumeration_callback callback, void *callback_data);\nlong enumerate_core(doublequotient_t *dq, enumeration_callback callback, void *callback_data);\n\n#endif\n"
},
{
"alpha_fraction": 0.5650272965431213,
"alphanum_fraction": 0.5715847015380859,
"avg_line_length": 25.14285659790039,
"blob_id": "05a7be00e4ec9694cc150002b548bb97d49e1ca3",
"content_id": "c8c1aeeff592d6ed7fce4ccf1183b0f01e9386ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 915,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 35,
"path": "/sage/core_facts.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "import utils\nimport sys\nfrom collections import defaultdict\n\ndef bruhat_len(s):\n \"\"\"Gives the length of the word\"\"\"\n if s==\"1\":\n return 0\n return len(s)\n\ndef print_hist(max_len, d):\n \"\"\"Given a defaultdict, prints it in a nice order.\"\"\"\n for i in range(max_len+1):\n print(\"{}: {}\".format(i,d[i]))\n\ndef make_len_hist(L):\n \"\"\"Make a histogram of lengths using defaultdict\"\"\"\n d = defaultdict(int)\n max_len = 0\n for x in L:\n x_len = bruhat_len(x)\n d[x_len] += 1\n if x_len > max_len:\n max_len = x_len\n return max_len, d\n\nif __name__==\"__main__\":\n d = utils.load_json_data(sys.argv[1])\n max_len, hist = make_len_hist(d[\"core\"])\n print(\"Type:\",d[\"cartan_type\"])\n print(\"Total size:\",d[\"weyl_order\"])\n print(\"Core size:\",d[\"num_core\"])\n print(\"Core length:\",max_len)\n print(\"Core distribution:\")\n print_hist(max_len,hist)\n"
},
{
"alpha_fraction": 0.5043758749961853,
"alphanum_fraction": 0.5228005647659302,
"avg_line_length": 30.02857208251953,
"blob_id": "181ed36afce66f1d29c0a86071816fc6dbd4d25c",
"content_id": "7c7063cd4303c531e29ede16ef10285c90f2c579",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2171,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 70,
"path": "/python/length_spread/gen_set.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "# Is the number of generators of a lower-half balanced ideal the maximum # of generators among all balanced ideals?\nimport utils\nfrom collections import defaultdict\nimport gzip\nimport os\n# datafile = \"/nas/share/ideals2021/data/A1A1A1A1A1.txt\"\n\nbig_list = []\n\n# DATAPATH = \"/home/jvacca4/data/\"\nDATAPATH = \"/nas/share/ideals2021/data/\"\nos.chdir(DATAPATH)\nfor datafile in os.listdir(\".\"):\n if \"summary\" in datafile or \"tail\" in datafile:\n print(\"Skipping\",datafile)\n continue\n # if datafile != \"A1A2.txt\":\n # continue\n print(datafile)\n d = {\"name\":datafile,\"summand\":1,\"order\":0,\"l(w0)\":0,\"num_gens\":defaultdict(int),\"Rh num_gens\":defaultdict(int)}\n gen_set = set()\n\n\n if datafile[-4:] ==\".txt\":\n f = open(datafile, \"rt\")\n else:\n f = gzip.open(datafile, 'rt')\n\n for line in f:\n line=line.strip()\n if \"x\" in line:\n d[\"summand\"] += line.count(\"x\")\n if \"Order:\" in line:\n words = line.split()\n maybe_order = int(words[3])\n if maybe_order>d[\"order\"]:\n d[\"order\"] = maybe_order\n d[\"l(w0)\"] = int(words[6])\n # elif \"Found\" in line:\n # words = line.split()\n # maybe_total = int(words[1])\n # if maybe_total>d[\"total\"]:\n # d[\"total\"] = maybe_total\n elif \"gen:\" in line:\n # for i in utils.get_gen_set(line):\n # gen_set.add(i)\n m,n = utils.get_ideal_stats(line)\n d[\"num_gens\"][n] += 1\n if m<=d[\"l(w0)\"]/2:\n d[\"Rh num_gens\"][n] += 1\n L = list(gen_set)\n d[\"gen_set\"] = L\n big_list.append(d)\n\n\nfor grp in big_list:\n print(grp[\"name\"])\n # Max # of generators in an ideal, and how many\n k1 = max(grp[\"num_gens\"].keys())\n # print(k1, grp[\"num_gens\"][k1])\n # # of \n if len(grp[\"Rh num_gens\"].keys())>0:\n k2 = max(grp[\"Rh num_gens\"].keys())\n # print(k2, grp[\"num_gens\"][k2])\n if k1==k2 and grp[\"num_gens\"][k1] == grp[\"num_gens\"][k2]:\n print(\"TRUE!\")\n else:\n print(\"-------False.\")\n else:\n print(\"No lower half ideals\")"
},
{
"alpha_fraction": 0.5211581587791443,
"alphanum_fraction": 0.5389754772186279,
"avg_line_length": 27.09375,
"blob_id": "571803c5db5b99232f0d857e0423e27e4923ef7f",
"content_id": "d1f0f1c4f067a1458d0499ce952b8fec41f31646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 898,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 32,
"path": "/python/order_to_ideals/order_to_ideals.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "## 5/25/2021 J Vaccaro\n## Script for calculating a linear regression between the order of a Weyl group and its number of balanced ideals.\n\nimport os\n\norders = []\nideals = []\nDATAPATH = \"/nas/share/ideals2021/data/\"\nos.chdir(DATAPATH)\nfor datafile in os.listdir(\".\"):\n if datafile[-4:] != \".txt\":\n continue\n print(datafile)\n order = 0\n ideal = 0\n with open(datafile, \"rt\") as f:\n for line in f:\n line=line.strip()\n if \"Order:\" in line:\n words = line.split()\n maybe_order = int(words[3])\n if maybe_order>order:\n order = maybe_order\n elif \"Found\" in line:\n words = line.split()\n maybe_ideal = int(words[1])\n if maybe_ideal>ideal:\n ideal = maybe_ideal\n orders.append(order)\n ideals.append(ideal)\n\nimport numpy"
},
{
"alpha_fraction": 0.5767852067947388,
"alphanum_fraction": 0.594430685043335,
"avg_line_length": 30.53913116455078,
"blob_id": "82d15f815a784f9a8816d7b3b2883d4c79224eeb",
"content_id": "ddfd543f52d13f893ba3bdc1f28833b48e5c7b88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3627,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 115,
"path": "/enumerate_rewrite/weyldata.c",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#include \"thickenings.h\"\n#include \"weyl.h\"\n#include \"queue.h\"\n\n#include <strings.h>\n#include <stdio.h>\n\nchar stringbuffer[100];\n\nstatic char* alphabetize(weylgroup_element_t *e, char *str)\n{\n if(e->wordlength == 0)\n sprintf(str, \"1\");\n else {\n for(int j = 0; j < e->wordlength; j++)\n str[j] = e->word[j] + 'a';\n str[e->wordlength] = 0;\n }\n\n return str;\n}\n\nint main(int argc, const char *argv[])\n{\n semisimple_type_t type;\n unsigned long right_invariance, left_invariance;\n int rank, order, positive;\n int fixpoints;\n\n doublequotient_t *dq;\n\n const char *alphabet = \"abcdefghijklmnopqrstuvwxyz\";\n\n // read arguments\n\n ERROR(argc < 2, \"Too few arguments!\\n\\nUsage is '%s A2A3' or '%s A2A3 -abc -abc' with\\nA2,A3 simple Weyl factors and abc,abc left/right invariance.\\n\\n\",argv[0],argv[0]);\n \n // Count the number of simple factors in the semisimple Weyl group\n type.n = strlen(argv[1])/2;\n fprintf(stdout, \"type.n=%d\\n\\n\",type.n);\n\n // Allocate memory, then read in the actual simple factors by letter/number, e.g. A5 is series 'A' and rank '5'. Series is A-G and the max rank is 9.\n type.factors = (simple_type_t*)malloc(type.n*sizeof(simple_type_t));\n for(int i = 0; i < type.n; i++) {\n type.factors[i].series = argv[1][2*i];\n type.factors[i].rank = argv[1][2*i+1] - '0';\n fprintf(stdout, \"type.factors[%d].series=%c, type.factors[%d].rank=%d\\n\\n\",i,type.factors[i].series,i,type.factors[i].rank);\n ERROR(argv[1][2*i] < 'A' || argv[1][2*i] > 'G' || argv[1][2*i+1] < '1' || argv[1][2*i+1] > '9', \"Arguments must be Xn with X out of A-G and n out of 1-9\\n\");\n }\n\n left_invariance = right_invariance = 0;\n // Additional command line arguments that were not factors are the left/right invariance.\n if(argc >= 3) {\n if(strcmp(argv[2], \"-\") != 0){\n printf(\"%s\\n\",argv[type.n+1]);\n for(int i = 0; i < strlen(argv[type.n + 1]); i++)\n\t left_invariance |= (1 << (argv[type.n + 1][i] - 'a'));\n }\n if(strcmp(argv[3], \"-\") != 0){\n for(int i = 0; i < strlen(argv[type.n + 2]); i++)\n\t right_invariance |= (1 << (argv[type.n + 2][i] - 'a'));\n }\n }\n\n // generate graph\n // dq is the Weyl graph\n\n // dq = weyl_generate_bruhat(type, left_invariance, right_invariance);\n\n // print stuff\n\n rank = weyl_rank(type); // number of simple roots\n order = weyl_order(type); // number of Weyl group elements\n positive = weyl_positive(type); // number of positive roots\n\n int *cartan_matrix;\n cartan_matrix = (int*)malloc(rank*rank*sizeof(int));\n weyl_cartan_matrix(type, cartan_matrix); //cartan matrix\n\n fprintf(stdout, \"Rank: %d\\nOrder: %d\\nPositive Roots: %d\\n\\n\", rank, order, positive);\n \n fprintf(stdout, \"Cartan matrix for %s:\\n\",argv[1]);\n for (int i=0; i<rank; i++) {\n fprintf(stdout, \"[ \");\n for (int j=0; j<rank; j++){\n // Make the spacing nice\n // TODO: Could also be rank*j+i, which will make the transpose.\n if (cartan_matrix[rank*i+j]>=0) {\n fprintf(stdout, \" \");\n }\n fprintf(stdout, \"%d \",cartan_matrix[rank*i+j]);\n }\n fprintf(stdout, \"]\\n\");\n }\n\n // print out weylgroup elements\n weylgroup_t *wgroup = weyl_generate(type);\n fprintf(stdout, \"\\nElements:\\n\");\n for (int i=0; i<order; i++){\n if (i!= 0){\n fprintf(stdout, \", \");\n }\n fprintf(stdout, \"%s\", alphabetize(&wgroup->elements[i], stringbuffer));\n // fprintf(stdout, \"%d, \", *wgroup->elements[i].word);\n }\n fprintf(stdout, \"\\n\");\n\n\n // Deconstruct the dq\n // weyl_destroy_bruhat(dq);\n weyl_destroy(wgroup);\n free(type.factors);\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6939083337783813,
"alphanum_fraction": 0.6951146125793457,
"avg_line_length": 27.83478355407715,
"blob_id": "46f37a582b95c2548aac798b9e5f578686a0db3a",
"content_id": "cb150bcdc161d159559507e34ab3fef84f5ac164",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3316,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 115,
"path": "/enumerate_rewrite/weyl.h",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#ifndef WEYL_H\n#define WEYL_H\n\n#include <inttypes.h>\n\nstruct _simple_type;\nstruct _semisimple_type;\nstruct _weylgroup_element;\nstruct _weylgroup;\nstruct _doublecoset;\nstruct _doublecoset_list;\nstruct _doublequotient;\n\ntypedef uint64_t weylid_t;\ntypedef struct _simple_type simple_type_t;\ntypedef struct _semisimple_type semisimple_type_t;\ntypedef struct _weylgroup_element weylgroup_element_t;\ntypedef struct _weylgroup weylgroup_t;\ntypedef struct _doublecoset doublecoset_t;\ntypedef struct _doublecoset_list doublecoset_list_t;\ntypedef struct _doublequotient doublequotient_t;\n\n/***************************** structures *******************************/\n\nstruct _simple_type {\n char series;\n int rank;\n};\n\nstruct _semisimple_type {\n int n;\n simple_type_t *factors;\n};\n\nstruct _weylgroup_element {\n int *word;\n int wordlength;\n weylgroup_element_t **left;\n weylgroup_element_t **right;\n weylgroup_element_t *opposite;\n int is_root_reflection; // boolean value\n weylid_t id; // a unique id\n int index;\n\n // only set if quotient is generated\n doublecoset_t *coset;\n};\n\nstruct _weylgroup {\n semisimple_type_t type;\n weylgroup_element_t *elements;\n weylgroup_element_t **lists;\n int *letters;\n};\n\nstruct _doublecoset {\n doublecoset_list_t *bruhat_lower;\n doublecoset_list_t *bruhat_higher;\n doublecoset_t *opposite;\n weylgroup_element_t *max;\n weylgroup_element_t *min;\n int index;\n};\n\nstruct _doublecoset_list {\n doublecoset_t *to;\n doublecoset_list_t *next;\n};\n\nstruct _doublequotient {\n semisimple_type_t type;\n int left_invariance; // bitmask with rank bits\n int right_invariance;\n int count; // number of double cosets\n doublecoset_t *cosets;\n weylgroup_element_t *group;\n doublecoset_list_t *lists; // only for memory allocation / freeing\n weylgroup_element_t **grouplists; // only for memory allocation / freeing\n int *groupletters; // only for memory allocation / freeing\n};\n\n/***************************** functions **************************************/\n\n/* query some basic information on root systems / Weyl groups */\n\n// the rank\nint weyl_rank(semisimple_type_t type);\n\n// the order of the weyl group\nint weyl_order(semisimple_type_t type);\n\n// the number of reduced positive roots\nint weyl_positive(semisimple_type_t type);\n\n// the Cartan matrix (has rank columns and rank rows)\nvoid weyl_cartan_matrix(semisimple_type_t type, int *m);\n\n// the opposition involution as a map from simple roots to simple roots (indexed from 0 to rank-1)\nint weyl_opposition(semisimple_type_t type, int simple_root);\n\n/* generate the Weyl group:\n weyl_destroy() has to be used to free memory\n */\nweylgroup_t *weyl_generate(semisimple_type_t type);\nvoid weyl_destroy(weylgroup_t *group);\n\n/* generate a double quotient of the Weyl group and its Bruhat order:\n left_invariance and right_invariance are bitmaps specifying a subset of the simple roots\n The Weyl group will be quotiented from the left and right by the subgroups generated by these simple root reflections\n weyl_destroy_bruhat() has to be used to free memory\n */\ndoublequotient_t *weyl_generate_bruhat(semisimple_type_t type, int left_invariance, int right_invariance);\nvoid weyl_destroy_bruhat(doublequotient_t *dq);\n\n#endif\n"
},
{
"alpha_fraction": 0.7795275449752808,
"alphanum_fraction": 0.7795275449752808,
"avg_line_length": 30.75,
"blob_id": "8db25a3c601cd14db4856702a03b67caabe1b165",
"content_id": "f47d6f7732be0de488ba72f0f0a856c2db92caeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 4,
"path": "/README.md",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "# Balanced ideals scripts\n## J Vaccaro\n\nStorage location for various scripts written for the blanced ideals project, as a backup.\n"
},
{
"alpha_fraction": 0.6416465044021606,
"alphanum_fraction": 0.647699773311615,
"avg_line_length": 34.956520080566406,
"blob_id": "8adfdd5c3aba87af59941d43753fdddf765b37ce",
"content_id": "0b0e37e2df4cbdf5bec8ee7373fbb17dd768b3d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 826,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 23,
"path": "/python/length_spread/zopen.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"Open file with automatic (de)compression based on filename\"\"\"\n# David Dumas <[email protected]>\n# This work is placed in the public domain.\nimport os\n\n\ndef zopen(fn, *args, **kwargs):\n \"\"\"Open a file, transparently decompressing on read or compressing on write\n if the file extension indicates bzip2 compression (.bz2) or gzip compression\n (.gz).\"\"\"\n # TODO: Handle (kw)args only supported by one of the backends gracefully\n # TODO: gzip, bzip use binary mode by default, open() uses text. Should\n # something be done to enforce a consistent default?\n _, ext = os.path.splitext(fn)\n ext = ext.lower()\n if ext == \".bz2\":\n import bz2\n return bz2.open(fn, *args, **kwargs)\n elif ext == \".gz\":\n import gzip\n return gzip.open(fn, *args, **kwargs)\n\n return open(fn, *args, **kwargs)"
},
{
"alpha_fraction": 0.5488349199295044,
"alphanum_fraction": 0.5765988826751709,
"avg_line_length": 27.01388931274414,
"blob_id": "4fae24186f43b71642da34457a4d32c2f2d8fb5d",
"content_id": "3da69daaf6ba29b25f6866b08f284ac889f285d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2017,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 72,
"path": "/enumerate_rewrite/graph.c",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "#include \"weyl.h\"\n#include \"queue.h\"\n\n#include <strings.h>\n#include <stdio.h>\n#include <memory.h>\n\nstatic char* alphabetize(weylgroup_element_t *e, char *str)\n{\n if(e->wordlength == 0)\n sprintf(str, \"1\");\n else {\n for(int j = 0; j < e->wordlength; j++)\n str[j] = e->word[j] + 'a';\n str[e->wordlength] = 0;\n }\n\n return str;\n}\n\nint main(int argc, const char *argv[])\n{\n\tsemisimple_type_t type;\n\tunsigned long right_invariance, left_invariance;\n\tdoublequotient_t *dq;\n\tconst char *alphabet = \"abcdefghijklmnopqrstuvwxyz\";\n\tchar stringbuffer[100];\n\tchar stringbuffer2[100];\n\n\tERROR(argc < 2, \"Too few arguments!\\n\");\n\n\ttype.n = 0;\n\tfor(int i = 0; i < argc - 1; i++) {\n\t\tif(argv[i+1][0] < 'A' || argv[i+1][0] > 'G')\n\t\t\tbreak;\n\t\ttype.n++;\n\t}\n\n\ttype.factors = (simple_type_t*)malloc(type.n*sizeof(simple_type_t));\n\tfor(int i = 0; i < type.n; i++) {\n\t\ttype.factors[i].series = argv[i+1][0];\n\t\ttype.factors[i].rank = argv[i+1][1] - '0';\n\t\tERROR(argv[i+1][0] < 'A' || argv[i+1][0] > 'G' || argv[i+1][1] < '1' || argv[i+1][1] > '9', \"Arguments must be Xn with X out of A-G and n out of 1-9\\n\");\n\t}\n\n\tleft_invariance = right_invariance = 0;\n\n\tif(argc - type.n >= 3) {\n\t\tif(strcmp(argv[type.n + 1], \"-\") != 0)\n\t\t\tfor(int i = 0; i < strlen(argv[type.n + 1]); i++)\n\t\t\t\tleft_invariance |= (1 << (argv[type.n + 1][i] - 'a'));\n\t\tif(strcmp(argv[type.n + 2], \"-\") != 0)\n\t\t\tfor(int i = 0; i < strlen(argv[type.n + 2]); i++)\n\t\t\t\tright_invariance |= (1 << (argv[type.n + 2][i] - 'a'));\n\t}\n\n\t// generate graph\n\n\tdq = weyl_generate_bruhat(type, left_invariance, right_invariance);\n\n fprintf(stdout, \"digraph test123 {\\n\");\n for(int i = 0; i < dq->count; i++)\n for(doublecoset_list_t *current = dq->cosets[i].bruhat_lower; current; current = current->next)\n fprintf(stdout, \"%s -> %s;\\n\",\n alphabetize(dq->cosets[i].min, stringbuffer),\n alphabetize(current->to->min, stringbuffer2));\n fprintf(stdout, \"}\\n\\n\");\n\n\t// clean up\n\tweyl_destroy_bruhat(dq);\n\tfree(type.factors);\n}\n"
},
{
"alpha_fraction": 0.46191513538360596,
"alphanum_fraction": 0.5386289358139038,
"avg_line_length": 20.63529396057129,
"blob_id": "6a8e06ca556287fbf9a608f4463e5390b9220c49",
"content_id": "81312e5112b07b7d44538a5083bbc1a035fda2e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1838,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 85,
"path": "/equiv_class/utils.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "import json\nimport datetime\n\ndef add_header(d,version=\"0.0.1\"):\n \"\"\"Adds the json header to files\"\"\"\n now = datetime.datetime.now()\n json_dt = now.isoformat()\n d[\"timestamp\"] = json_dt\n d[\"creator\"] = __file__\n d[\"version\"] = version\n\ndef get_ideal_stats(line):\n \"\"\"Gets the max_length of the ideal and the number of generators\"\"\"\n L = line.strip().split(\":\")\n L = L[-1].strip().split()\n m = 0\n for gen in L:\n gen_l = len(gen)\n if gen_l>m:\n m = gen_l\n return m, len(L)\n\ndef get_gen_set(line):\n L = line.strip().split(\":\")\n L = L[-1].strip().split()\n return L\n\ndef load_json_data(filename):\n with open(filename, \"rt\") as f:\n return json.load(f)\n\ndef save_json_data(filename, data):\n with open(filename, \"wt\") as f:\n json.dump(data, f)\n\ndef simple_list():\n return [\"A1.txt\",\"A2.txt\",\"A3.txt\",\"A4.txt\",\n \"B2.txt\",\"B3.txt\",\"D4.txt\",\"G2.txt\"]\n\ndef fast_data_list():\n return [\"A1A1A1A1A1A1A1.txt\",\n \"A1A1A2.txt\",\n \"A2B2.txt\",\n \"A1A1A1A1A1A1.txt\",\n \"A1A1A3.txt\",\n \"A1B2B2.txt\",\n \"B2B2.txt\",\n \"A1A1A1A1A1.txt\",\n \"A1A1B2.txt\",\n \"A1B2G2.txt\",\n \"A2G2.txt\",\n \"A1A1A1A1A2.txt\",\n \"A1A1G2.txt\",\n \"A1B2.txt\",\n \"B2G2.txt\",\n \"D4.txt\",\n \"A1A1A1A1B2.txt\",\n \"A1A1.txt\",\n \"A1B3.txt\",\n \"A2.txt\",\n \"G2G2.txt\",\n \"A1A1A1A1.txt\",\n \"A1A2A2.txt\",\n \"A3B2.txt\",\n \"B2.txt\",\n \"A1A1A1A2.txt\",\n \"G2.txt\",\n \"A1A1A1B2.txt\",\n \"A1A2B2.txt\",\n \"A1G2.txt\",\n \"A1A1A1G2.txt\",\n \"A1A2G2.txt\",\n \"A1.txt\",\n \"B3.txt\",\n \"A1A1A1.txt\",\n \"A1A2.txt\",\n \"A2A2.txt\",\n \"A3.txt\",\n \"A1A1A2A2.txt\",\n \"A1A3.txt\",\n \"A2A3.txt\",\n \"A4.txt\"]\n\ndef slow_data_list():\n return [\"A1A2A3.txt.gz\",\"A1A4.txt.gz\",\"A1D4ideals.txt.gz\",\"A2B3.txt.gz\",\"A3G2.txt.gz\",\"B2B3ideals.txt.gz\"]"
},
{
"alpha_fraction": 0.5824039578437805,
"alphanum_fraction": 0.5873606204986572,
"avg_line_length": 32.45833206176758,
"blob_id": "7be791278a64f977317f5880ff26fdca72637337",
"content_id": "1302fe5c4d1a2b6451e20f854d37b1042757c65f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 24,
"path": "/python/length_spread/txt_to_json.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "import utils\nimport sys\nimport os\n\ndef write_ideals_to_json(infile, outfile):\n basename = os.path.basename(infile)\n name = os.path.splitext(basename)\n d = {\"name\":name,\"summands\":[],\"ideals\":[]}\n with open(infile, \"rt\") as f:\n summands = f.readline().strip().split(\"x\")\n d[\"summands\"] = [s.strip() for s in summands]\n for line in f:\n line=line.strip()\n if \"gen:\" in line:\n d[\"ideals\"].append([g for g in utils.get_gen_set(line)])\n utils.save_json_data(outfile, d)\n print(\"Success! Wrote {} ideals to {}\".format(infile,outfile))\n\nif __name__==\"__main__\":\n if len(sys.argv) < 3:\n raise TypeError(\"Requires 3 input arguments\")\n infile = sys.argv[1]\n outfile = sys.argv[2]\n write_ideals_to_json(infile, outfile)\n\n\n\n\n"
},
{
"alpha_fraction": 0.5424675941467285,
"alphanum_fraction": 0.5525256991386414,
"avg_line_length": 28.635761260986328,
"blob_id": "7ddb560b90ea010f1a3ac7838fb6416e325eaf2d",
"content_id": "b0450fcca64b217cdfa7351a024dc1bfa04c04e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4474,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 151,
"path": "/equiv_class/transpositions.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "from collections import defaultdict\nimport equivalence_class\n# from collections import defaultdict\nclass weyl_elt:\n def __init__(self,oneline):\n self.update_oneline(oneline)\n def update_oneline(self,oneline):\n self.oneline = oneline\n self.oneline_len = len(oneline)\n self.word = equivalence_class.to_word(oneline)\n if self.word==\"1\":\n self.word_len = 0\n else:\n self.word_len = len(self.word)\n self.find_triples()\n def __eq__(self,other):\n return self.word==other.word\n def __repr__(self):\n return \"{}:{}\".format(self.word,equivalence_class.matrix(self.oneline).sum().total_sum())\n def permute(self, other):\n new_oneline = [other.oneline[i] for i in self.oneline]\n return weyl_elt(new_oneline)\n\n\n def find_triples(self):\n num_triples = 0\n for i in range(self.oneline_len-2):\n if self.oneline[i]<self.oneline[i+1]<self.oneline[i+2]:\n num_triples+=1\n self.num_triples = num_triples\n\n def make_elts(self, d):\n for i in range(self.oneline_len-1):\n new_oneline = [x for x in self.oneline]\n new_oneline[i],new_oneline[i+1] = new_oneline[i+1],new_oneline[i]\n new_elt = weyl_elt(new_oneline)\n if new_elt.word_len < self.word_len:\n continue\n else:\n if new_elt not in d[new_elt.word_len]:\n d[new_elt.word_len].append(new_elt)\n\n\ndef gen_all(n):\n D = {}\n root = weyl_elt(equivalence_class.to_oneline(\"1\",n))\n D[0] = [root]\n i = 0\n while D[i]:\n D[i+1] = []\n print(\"----------------- start row\",i)\n for elt in D[i]:\n print(elt)\n print(equivalence_class.matrix(elt.oneline).sum())\n elt.make_elts(D)\n print(\"----------------- end row\",i)\n i += 1\n\nfrom termcolor import colored\n# import matplotlib.pyplot as plt\n# import numpy as np\ndef print_matrices(elt, color=\"white\"):\n \"\"\"Prints out an element and some matrices\"\"\"\n w0_elt = elt.w0()\n s = \"{} {}:{} {}\".format(elt.word,elt.oneline,w0_elt.word,w0_elt.oneline)\n print(colored(s,color))\n print(colored(\"------\",color))\n elt_sigma = elt.matrix_sigma()\n print(elt_sigma)\n w0_sigma = w0_elt.matrix_sigma()\n print(w0_sigma)\n print(colored(\"------\",color))\n elt_delta = elt_sigma.dot()\n print(elt_delta)\n w0_delta = w0_sigma.dot()\n print(w0_delta)\n # plt.figure()\n # plt.imshow(np.array(elt_delta.data))\n # plt.imshow(np.array(w0_delta.data))\n # plt.show()\n # print(colored(\"-------------------------\",color))\n # exit(0)\n\n\nA = equivalence_class.equiv_tree(3)\nA.make_equiv_class()\nequiv = A.nodes(\"equiv\")\nprint(\"\\nEquivalence class\")\nprint(\"Size:\",len(equiv))\n# for elt in equiv:\n# print_matrices(elt,\"white\")\n# exit(0)\n\nfor elt in equiv:\n print(elt.word,end=\", \")\n\nA.make_ascending_equiv_class()\nascend = A.nodes(\"ascend\")\nprint(\"\\n\\nEquivalence class (ascending only)\")\nprint(\"Size:\",len(ascend))\nfor elt in ascend:\n print(elt.word,end=\", \")\n\nA.make_symmetric_group()\nsym = A.nodes(\"sym\")\nprint(\"\\n\\nSymmetric group\")\nprint(\"Size:\",len(sym))\nD = defaultdict(list)\nfor elt in sym:\n # print(elt.word,end=\", \")\n D[elt.word_len].append(elt)\n# print(\"\")\n\n\n\nfor i in range(max(D.keys())//2+1):\n print(\" Rank:\",i)\n print(\"--------------------------------------------\")\n for elt in D[i]:\n if elt in ascend:\n print_matrices(elt,\"green\")\n elif elt in equiv:\n print_matrices(elt,\"blue\")\n else:\n print_matrices(elt,\"white\")\n print(\"\\n===========================================\")\n\n# hull_c = []\n# # D = defaultdict(list)\n# for elt in core:\n# # D[elt.word_len].append(elt)\n# w0_elt = elt.w0()\n# print(elt.word, elt.oneline, \":\", w0_elt.word, w0_elt.oneline)\n# # print(elt.matrix_permutation())\n# elt_sigma = elt.matrix_sigma()\n# w0_sigma = w0_elt.matrix_sigma()\n# print(elt_sigma > w0_sigma)\n# print(elt_sigma < w0_sigma)\n# print(elt_sigma.total_sum(), w0_sigma.total_sum(), elt_sigma.total_sum()-w0_sigma.total_sum())\n# print(elt_sigma-w0_sigma)\n# print(\"---\")\n# print(elt.matrix_sigma())\n# print(\"---\")\n# print(w0_elt.matrix_sigma())\n# # for i in range(max(D.keys())+1):\n# # for elt in D[i]:\n# # print(str(elt),end=\" \")\n# # print(\"\")\n\n# # print(\"----------------\")\n# # gen_all(4)"
},
{
"alpha_fraction": 0.6075000166893005,
"alphanum_fraction": 0.6287500262260437,
"avg_line_length": 26.55172348022461,
"blob_id": "6a29a710f4a100b6716048096b978a38b74d92c4",
"content_id": "8da1f0f5852001964080d18410e1a71225571e6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 29,
"path": "/python/length_spread/read_all.py",
"repo_name": "jlynnvaccaro/balanced_ideals_scripts",
"src_encoding": "UTF-8",
"text": "# Is the number of generators of a lower-half balanced ideal the maximum # of generators among all balanced ideals?\nimport utils\nfrom collections import defaultdict\nimport gzip\nimport os\n# datafile = \"/nas/share/ideals2021/data/A1A1A1A1A1.txt\"\n\nbig_list = []\n\n# DATAPATH = \"/home/jvacca4/data/\"\nDATAPATH = \"/nas/share/ideals2021/data/\"\nos.chdir(DATAPATH)\nfor datafile in os.listdir(\".\"):\n if \"summary\" in datafile or \"tail\" in datafile or \"png\" in datafile:\n print(\"Skipping\",datafile)\n continue\n # if datafile != \"A1A2.txt\":\n # continue\n print(datafile)\n\n if datafile[-4:] ==\".txt\":\n f = open(datafile, \"rt\")\n else:\n f = gzip.open(datafile, 'rt')\n\n for line in f:\n line=line.strip()\n if \"x\" in line:\n print(\"----\",line)\n\n"
}
] | 24 |
roshanpokhrel/webscrape
|
https://github.com/roshanpokhrel/webscrape
|
cf39c6826913c2e01490797e3a48943574eddcaf
|
23f3e07b14bbd6ea5ee1b8d5f21e4a7b7b7b1657
|
f73241e87cef9250d8e6a3648b23920dddccb19f
|
refs/heads/master
| 2020-03-25T17:17:05.148421 | 2018-08-08T06:36:22 | 2018-08-08T06:36:22 | 143,971,587 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7051094770431519,
"alphanum_fraction": 0.7116788029670715,
"avg_line_length": 30.159090042114258,
"blob_id": "7cfa8bc5f3f464ee75f8482cc31e6b0550228649",
"content_id": "85c7f9f0bceec54ec321a0cabcdd18b23c736d6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1370,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 44,
"path": "/Web Scrape.py",
"repo_name": "roshanpokhrel/webscrape",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\n\nmy_url = 'https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphics%20card'\n\n# opening up a connection, grabbing the page\nuClient = uReq(my_url)\n# offloads contents into variable\npage_html = uClient.read()\n# close the connection\nuClient.close()\n\n# html parsing\npage_soup = soup(page_html, \"html.parser\")\n# print(page_soup.h1)\n# print(page_soup.p)\n# print(page_soup.body)\n\n# Find all div that have the class item-container\ncontainers = page_soup.findAll(\"div\", {\"class\":\"item-container\"})\n#print(len(container))\n#print(container[0])\n\n# Open a csv file to write to\nfilename = \"products_name.csv\"\nf = open(filename, \"w\")\n\nheaders = \"brand, product_name, shipping\\n\"\nf.write(headers)\nfor container in containers:\n\tbrand = container.div.div.a.img[\"title\"]\t\t#Who makes this graphics card\n\t\n\ttitle_container = container.findAll(\"a\", {\"class\":\"item-title\"})\n\tproduct_name = title_container[0].text\n\n\tshipping_container = container.findAll(\"li\", {\"class\": \"price-ship\"})\n\tshipping = shipping_container[0].text.strip()\t\t#strip to cut all whitespaces and other characters\n\n\tf.write(brand + ',' + product_name.replace(\",\", \"|\") + ',' + shipping + '\\n')\n#\tprint(\"My brand is:\" + brand)\n#\tprint(\"My Product is:\" + product_name)\n#\tprint(\"My shipping detail is:\" + shipping)\n\nf.close()"
}
] | 1 |
SathwikTejaswi/Neural-Networks-From-Scratch
|
https://github.com/SathwikTejaswi/Neural-Networks-From-Scratch
|
b3720b55920f81a9628ce821f93c20a4cf420773
|
0b0a0b37dbdbba1f7de4e3e532c56c8afdd6fc49
|
d899748c118212d71f657ce6e19ffbecd7874ded
|
refs/heads/master
| 2020-03-28T07:24:30.689836 | 2018-09-09T20:11:32 | 2018-09-09T20:11:32 | 147,899,072 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5446808338165283,
"alphanum_fraction": 0.5929077863693237,
"avg_line_length": 21.774192810058594,
"blob_id": "aef13210798395e22eee9fa644b9b6e79d0710a3",
"content_id": "14bbc13c70a41bb898cb8c918466e8a4f4bd7d33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 705,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 31,
"path": "/utils.py",
"repo_name": "SathwikTejaswi/Neural-Networks-From-Scratch",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\ndef rectified_linear_unit(x):\n if x<0:\n return(0)\n else :\n return(x)\n\ndef grad_rectified_linear_unit(x):\n x[x>0] = 1\n x[x<=0] = 0\n return x\n \ndef one_hot(label):\n arr = np.zeros(shape = (10,1))\n arr[label] = 1\n return arr\n\ndef categorical_cross_entropy(y_hat, label):\n return -sum(np.multiply(np.log(y_hat), one_hot(label)))\n\ndef stable_softmax(X):\n temp2 = np.exp(X - np.max(X))\n return temp2 / np.sum(temp2)\n\ndef decay_alpha(i):\n \n return( 0.01 if i <=12 else (0.001 if i <= 24 else (0.0001)) )\n \ndef accuracy(y_hat, true):\n return 100*sum(np.where(y_hat == np.reshape(true,len(true)), 1,0))/len(y_hat)"
},
{
"alpha_fraction": 0.7476099133491516,
"alphanum_fraction": 0.7629063129425049,
"avg_line_length": 86.16666412353516,
"blob_id": "64129533078032e5e95357c00b01b866c56d6efe",
"content_id": "786668f1be77c8e828117ebbe7b8d34b1bd8574f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 6,
"path": "/README.md",
"repo_name": "SathwikTejaswi/Neural-Networks-From-Scratch",
"src_encoding": "UTF-8",
"text": "# Neural-Networks-From-Scratch\n#### Classification of a MNIST dataset using a single hidden layer neural network (python)\n#### The implementation depends on the following libraries : numpy, pandas, h5py\n#### A sample output from the model is shown below. (Note : test accuracy (97%) is displayed as 0.97)\n- \n- \n"
},
{
"alpha_fraction": 0.5225772857666016,
"alphanum_fraction": 0.546370267868042,
"avg_line_length": 30.298913955688477,
"blob_id": "33ff61b25105bc54b6dca7d0c531c08ba398215f",
"content_id": "75a1887f4225a24e7314c76385c8d2f43bb336af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5758,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 184,
"path": "/NN.py",
"repo_name": "SathwikTejaswi/Neural-Networks-From-Scratch",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport h5py\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom utils import rectified_linear_unit\nfrom utils import grad_rectified_linear_unit\nfrom utils import one_hot\nfrom utils import categorical_cross_entropy\nfrom utils import stable_softmax\nfrom utils import decay_alpha\nfrom utils import accuracy\n\nclass neural_network:\n \n def __init__(self,lr=0.001,loss_func='categorical_cross_entropy',MNIST=True,xtr=None,xts=None,ytr=None,yts=None):\n \n self.lr = lr\n self.loss_func = 'categorical_cross_entropy'\n \n if MNIST != True :\n assert(None not in [xtr,xts,ytr,yts])\n self.x_train = xtr\n self.x_test = xts\n self.y_train = ytr\n self.y_test = yts\n\n else :\n self.prepare_data()\n \n def prepare_data(self):\n \n self.get_data()\n self.x_train = (self.data_dict['x_train']).reshape(60000,784)\n self.x_test = (self.data_dict['x_test']).reshape(10000,784)\n self.y_train = (self.data_dict['y_train'])\n self.y_test = (self.data_dict['y_test'])\n \n def get_data(self,path='/Users/apple/Desktop/MNISTdata.hdf5'):\n \n print('')\n print('Fetching MNSIT Data')\n print('-------------------')\n data = h5py.File(path,'r')\n self.data_dict = {}\n \n for i in data:\n self.data_dict[i] = np.array(data[i])\n \n def get_data_stats(data_dict):\n \n print('The data has',list(data_dict.keys()))\n for i in data_dict:\n print(i, 'has shape :')\n print(np.array(data_dict[i]).shape)\n \n get_data_stats(self.data_dict)\n \n \n def print_model(self):\n print('')\n print('Model configurations are as follows :')\n print('-------------------------------------')\n \n print('layer 1')\n print('W1 has dim', self.W1.shape)\n print('b1 has dim', self.b1.shape)\n print('W2 has dim', self.W2.shape)\n print('b2 has dim', self.b2.shape)\n print('Input has dim', self.l1)\n print('Hidden layer has dim', self.l2)\n print('Output has dim', self.l3)\n \n def create_model(self, L2):\n \n self.l1 = 784\n self.l2 = L2\n self.l3 = 10\n\n self.W1 = np.random.randn(self.l2,self.l1)/np.sqrt(self.l1)\n self.b1 = np.zeros(shape = (self.l2,1))\n self.W2 = np.random.randn(self.l3,self.l2)/np.sqrt(self.l2)\n self.b2 = np.zeros(shape = (self.l3,1))\n \n def forward(self,x):\n \n self.z1 = np.array([self.W1.dot(x)]).transpose() + self.b1\n vec_rectified_linear_unit = np.vectorize(rectified_linear_unit)\n self.h = vec_rectified_linear_unit(self.z1)\n self.h = self.h.transpose()[0]\n self.z2 = np.array([self.W2.dot(self.h)]).transpose() + self.b2\n self.y_hat = stable_softmax(self.z2)\n \n def predict(self,x):\n y_hat_lab = np.zeros(shape = (len(x)))\n for i in range(len(x)):\n self.forward(x[i])\n y_hat_lab[i] = np.argmax(self.y_hat)\n return y_hat_lab\n \n def back_prop(self,Ytr1,Xtr1):\n \n y_hat = self.y_hat\n h = self.h\n z1 = self.z1\n \n true = one_hot(Ytr1)\n\n diff_outer = -(true - y_hat)\n\n del_b2 = diff_outer\n del_W2 = np.matmul(diff_outer,np.reshape(h,(1,self.l2)))\n DEL = self.W2.transpose().dot(diff_outer)\n\n NAB = np.multiply(DEL, grad_rectified_linear_unit(z1))\n del_b1 = NAB\n del_W1 = np.matmul(np.reshape(NAB, (self.l2,1)), np.reshape(Xtr1, (1,self.l1)))\n\n self.W2 = self.W2 - alpha * del_W2\n self.b2 = self.b2 - alpha * del_b2\n self.b1 = self.b1 - alpha * del_b1\n self.W1 = self.W1 - alpha * del_W1\n \n def save_weights(self):\n pd.DataFrame(self.W1).to_csv('W1.csv',index=False)\n pd.DataFrame(self.W2).to_csv('W2.csv',index=False)\n pd.DataFrame(self.b1).to_csv('b1.csv',index=False)\n pd.DataFrame(self.b2).to_csv('b2.csv',index=False)\n \n def load_model(self):\n self.W1 = np.array(pd.read_csv('W1.csv'))\n self.W2 = np.array(pd.read_csv('W2.csv'))\n self.b1 = np.array(pd.read_csv('b1.csv'))\n self.b2 = np.array(pd.read_csv('b2.csv'))\n \n def test_and_summarize(self):\n temp = [email protected]+self.b1.T\n temp = np.clip(temp,a_min=0,a_max=temp.max())\n temp = [email protected]+self.b2.T\n temp = np.exp(temp)\n temp2 = temp.sum(axis=1)\n temp = temp/temp2.reshape(-1,1)\n preds = np.argmax(temp,axis=1)\n print('the testing accuracy of the classifier is :',sum(preds.reshape(-1,1) == self.y_test.reshape(-1,1))/len(self.y_test))\n \n \n \nNN = neural_network()\nNN.create_model(100)\nNN.print_model()\n\n\n\nx_learn, x_val, y_learning, y_val = train_test_split(NN.x_train, NN.y_train)\nL = [i for i in range(0,len(x_learn))]\nepochs = 14\nprint('')\nprint('------------------------')\nprint(\"start training the net\")\n\nfor j in range(epochs):\n \n #IMPLEMETING SGD ALGORITHM\n \n alpha = decay_alpha(j)\n loss = 0\n np.random.shuffle(L)\n \n for i in L:\n NN.forward(x_learn[i])\n NN.back_prop(y_learning[i],x_learn[i])\n \n loss = loss + categorical_cross_entropy(NN.y_hat,y_learning[i])\n \n loss = loss/len(L)\n predicted_labels_validation = NN.predict(x_val)\n print('Epoch Summary for epoch:',j)\n print('Loss ->',loss[0])\n print('accuracy ->',accuracy(predicted_labels_validation,y_val))\n print('')\n \n \n\n \nNN.test_and_summarize()"
}
] | 3 |
onjoku/GitHubApi567
|
https://github.com/onjoku/GitHubApi567
|
07e081158f46159d46e4415d57549a8693642ace
|
ba9a44abbaabbe73c96e66f82c2a989cb5cb8de3
|
c10ab091133e73f0041a96b09642cd448c4125aa
|
refs/heads/master
| 2020-07-31T09:38:10.311383 | 2019-10-08T09:04:18 | 2019-10-08T09:04:18 | 210,562,888 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5983145833015442,
"alphanum_fraction": 0.6020599007606506,
"avg_line_length": 26.105262756347656,
"blob_id": "a409da184bfe5d27b97c6494901de1ea41bc3e49",
"content_id": "7c2101c38893ff7f86fb4bf482a2e6b4916c303f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2136,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 76,
"path": "/GitHubApi567.py",
"repo_name": "onjoku/GitHubApi567",
"src_encoding": "UTF-8",
"text": "language: python\r\npython:\r\n - \"3.5\"\r\ninstall:\r\n - pip install requests\r\n\r\nimport json\r\nimport requests\r\n\"\"\"\r\nProgram: The program is a function that will communicate using the RESTful services APIs provided by GitHub. The GitHub APIs will\r\n allow you to query for information about users and their repositories,\r\n\r\nAUTHOR: Ogadinma Njoku\r\n\r\nPURPOSE: A graduate course assignment for Software Testing\r\n\"\"\"\r\n\r\n\r\ndef user_api_commits(username):\r\n try:\r\n res = requests.get('https://api.github.com/users/%s/repos' % username)\r\n except urllib.error.HTTPError as e:\r\n print('HTTPError:{}'.format(e.code))\r\n except urllib.error.URLError as e:\r\n print('URLError: {}'.format(e.reason))\r\n else:\r\n print('OK!')\r\n repos = json.loads(res.content)\r\n\r\n for repo in repos:\r\n if repo['fork'] is True: continue\r\n num = count_repo_commits(repo['url'] + '/commits')\r\n repo['num_commits'] = num\r\n yield repo\r\n\r\n\r\ndef count_repo_commits(commits_url, account=0):\r\n res = requests.get(commits_url)\r\n commits = json.loads(res.content)\r\n num = len(commits)\r\n if num == 0:\r\n return account\r\n link = res.headers.get('link')\r\n if link is None:\r\n return account + num\r\n second_url = find_second(r.headers['link'])\r\n if second_url is None:\r\n return account + num\r\n # Iteratively recurse the function result\r\n return count_repo_commits(second_url, account + num)\r\n\r\n\r\n# Find a second link for Github API \r\ndef find_second(link):\r\n for line in link.split(','):\r\n aline, bline = line.split(';')\r\n if bline.strip() == 'rel=\"next\"':\r\n return aline.strip()[1:-1]\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n try:\r\n username = input('Enter the User : ')\r\n print(\"Repositories in user's Github API Interface \")\r\n except IndexError:\r\n print ( \"Usage: %s <username>\" % username)\r\n \r\n for repo in user_api_commits(username):\r\n print (\"Repo: %(name)s Number %(num_commits)d of commits.\" % repo)\r\n \r\nlanguage: python\r\npython:\r\n - \"3.5\"\r\ninstall:\r\n - pip install requests\r\n"
}
] | 1 |
istiophorus/Codility
|
https://github.com/istiophorus/Codility
|
9cd0d7743a9a66d234bb5c8950dce4b5ff031752
|
0708777dd2174924963517bf1b1bfb2253e2591c
|
edd89cf3ce50157ce36688731b619c523031c452
|
refs/heads/master
| 2021-05-16T03:12:28.120237 | 2020-04-10T20:56:15 | 2020-04-10T20:56:15 | 29,367,999 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4434947669506073,
"alphanum_fraction": 0.45394110679626465,
"avg_line_length": 26.736841201782227,
"blob_id": "299aec7e5c793de9338a125f92e4915c8aa04fc4",
"content_id": "ee9090328e1e4437165504ad92d7c70f662a4bf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1053,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 38,
"path": "/Python/FishTask.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# https://app.codility.com/programmers/lessons/7-stacks_and_queues/fish/\n# https://app.codility.com/demo/results/trainingTAQU2M-AME/\n\ndef solution(arrA, arrB):\n if arrA == None:\n raise ValueError()\n \n if arrB == None:\n raise ValueError() \n \n if len(arrA) != len(arrB):\n raise ValueError()\n \n ab = zip(arrA, arrB)\n \n alive = 0\n \n downstream = []\n upstream = []\n \n for currentFish,di in ab:\n if di == 0: # upstream\n if len(downstream) == 0:\n alive = alive + 1\n else:\n while len(downstream) > 0:\n last = downstream[-1]\n if last < currentFish:\n downstream.pop()\n elif last > currentFish:\n break\n \n if len(downstream) <= 0:\n alive = alive + 1\n else: # di == 1\n downstream.append(currentFish)\n \n return alive + len(downstream)"
},
{
"alpha_fraction": 0.478658527135849,
"alphanum_fraction": 0.5304877758026123,
"avg_line_length": 17.27777862548828,
"blob_id": "84df7539f1ce6c27bd06d0b702f0a8b182e6836f",
"content_id": "085feb1bf0db32ca7123a09cca26f33b9b4f4d3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 328,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 18,
"path": "/Python/CyclicRotation1.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "#https://app.codility.com/demo/results/trainingE4UNAA-Y9B/\ndef cyclic_rotation_1(arr, k):\n if k < 0 or k > 100:\n raise ValueError\n\n al = len(arr)\n\n if al == 0:\n return []\n\n k = k % al\n\n if k == 0:\n return arr\n\n return (arr * 2)[al-k:al + al-k]\n\n#print(cyclic_rotation_1([3, 8, 9, 7, 6],5))"
},
{
"alpha_fraction": 0.4348387122154236,
"alphanum_fraction": 0.4516128897666931,
"avg_line_length": 22.134328842163086,
"blob_id": "8245cd21d1c3cbff0b5d49a20b450bbc1527409e",
"content_id": "257b29c24a3b3fbf7dbd5ff483ea4db954749cc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1550,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 67,
"path": "/Python/GenomicRangeQuery.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n# GenomicRangeQuery\n# https://app.codility.com/programmers/lessons/5-prefix_sums/genomic_range_query/\n# https://app.codility.com/demo/results/trainingBDRSRN-WDW/\n\ndef solution(genom, p, q):\n si = len(genom)\n arrAP = [0] * si\n arrCP = [0] * si\n arrGP = [0] * si\n arrTP = [0] * si\n\n arrAQ = [0] * si\n arrCQ = [0] * si\n arrGQ = [0] * si\n arrTQ = [0] * si\n \n queries = zip(p, q)\n \n counterA = 0\n counterC = 0\n counterG = 0\n counterT = 0\n \n for ix, ch in enumerate(genom):\n arrAP[ix] = counterA\n arrGP[ix] = counterG\n arrCP[ix] = counterC\n arrTP[ix] = counterT \n \n if ch == 'A':\n counterA += 1\n if ch == 'C':\n counterC += 1\n if ch == 'G':\n counterG += 1\n if ch == 'T':\n counterT += 1 \n \n arrAQ[ix] = counterA\n arrGQ[ix] = counterG\n arrCQ[ix] = counterC\n arrTQ[ix] = counterT \n\n result = []\n \n for ix, t in enumerate(queries):\n vp, vq = t\n \n diffA = arrAQ[vq] - arrAP[vp]\n diffG = arrGQ[vq] - arrGP[vp]\n diffC = arrCQ[vq] - arrCP[vp]\n diffT = arrTQ[vq] - arrTP[vp]\n \n minImpact = 0\n \n if diffA > 0:\n minImpact = 1\n elif diffC > 0:\n minImpact = 2\n elif diffG > 0:\n minImpact = 3\n elif diffT > 0:\n minImpact = 4\n \n result.append(minImpact)\n \n return result"
},
{
"alpha_fraction": 0.5504950284957886,
"alphanum_fraction": 0.60792076587677,
"avg_line_length": 14.78125,
"blob_id": "32ddecea15ac2ecd701c98836f325e6b6c030749",
"content_id": "8118d09bc4af6214289292867cb0ee2dcd8731bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 505,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 32,
"path": "/Go/PassingCars.go",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "package PassingCars\n\nimport (\n\t\"fmt\"\n)\n\n// https://app.codility.com/programmers/lessons/5-prefix_sums/passing_cars/\n// https://app.codility.com/demo/results/trainingGNAZ5Z-D4M/\n\nfunc Solution(arr []int) int {\n\tzeros := 0\n\tresult := 0\n\t\n\tfor _,v := range arr {\n\t\tif v == 0 {\n\t\t\tzeros += 1\n\t\t} else {\n\t\t\tif result > 1000000000 - zeros {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\t\n\t\t\tresult += zeros\n\t\t}\n\t}\n\t\n\treturn result\n}\n\nfunc main() {\n\tfmt.Println(Solution([]int{0,0,1,0,1,1}))\n\tfmt.Println(Solution([]int{0,1,0,1,1}))\t\n}\n"
},
{
"alpha_fraction": 0.6094750165939331,
"alphanum_fraction": 0.6581305861473083,
"avg_line_length": 18.524999618530273,
"blob_id": "ad5642b80b5f8a1c0fa24d8366ee0a3a688f88a1",
"content_id": "934a2a8709983ae6a14f41548b612accf4132798",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 783,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 40,
"path": "/Codility.Training.Tests/EquiLeaderTests.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class EquiLeaderTests\n\t{\n\t\tprivate readonly EquiLeader _target = new EquiLeader();\n\n\t\t[TestMethod]\n\t\tpublic void Test01()\n\t\t{\n\t\t\tAssert.AreEqual(2, _target.Solve(new Int32[] { 4, 3, 4, 4, 4, 2 }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void Test02()\n\t\t{\n\t\t\tAssert.AreEqual(1, _target.Solve(new Int32[] { 4, 4 }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void Test03()\n\t\t{\n\t\t\tAssert.AreEqual(0, _target.Solve(new Int32[] { 1, 4 }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void Test04()\n\t\t{\n\t\t\tAssert.AreEqual(3, _target.Solve(new Int32[] { 4, 4, 2, 5, 3, 4, 4, 4 }));\n\t\t}\n\n\t}\n}\n"
},
{
"alpha_fraction": 0.6029411554336548,
"alphanum_fraction": 0.6649159789085388,
"avg_line_length": 21.023256301879883,
"blob_id": "09356bf25e19e8c51eb3db21a27ea23440c93d2e",
"content_id": "59ef54faa222cdff8328dde31e98a8a33a9b3f4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 954,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 43,
"path": "/Codility.Training.Tests/CountDivTests.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class CountDivTests\n\t{\n\t\tprivate readonly CountDiv _target = new CountDiv();\n\n\t\t[TestMethod]\n\t\tpublic void Test01()\n\t\t{\n\t\t\tAssert.AreEqual(1, _target.Solve(5, 6, 5));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(5, 7, 5));\n\n\t\t\tAssert.AreEqual(3, _target.Solve(6, 11, 2));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(4, 6, 3));\n\n\t\t\tAssert.AreEqual(3, _target.Solve(6, 13, 3));\n\n\t\t\tAssert.AreEqual(2, _target.Solve(1, 6, 3));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(6, 14, 5));\n\n\t\t\tAssert.AreEqual(2, _target.Solve(6, 15, 5));\n\n\t\t\tAssert.AreEqual(2, _target.Solve(6, 19, 5));\n\n\t\t\tAssert.AreEqual(2, _target.Solve(7, 15, 5));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(0, 1, 11));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(10, 10, 5));\n\t\t}\n\t}\n}\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5819672346115112,
"alphanum_fraction": 0.6029143929481506,
"avg_line_length": 16.15625,
"blob_id": "fb8d0210e6547248010e070f5d9274ec72e1341f",
"content_id": "be9e5d0ef19eb087b1cc9ab7e506e5dd58d2a3c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1100,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 64,
"path": "/Codility.Training/MissingInteger.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// solution of Codility problem:\n\t/// https://codility.com/demo/take-sample-test/missing_integer\n\t/// and my solution:\n\t/// https://codility.com/demo/results/demoCJS5D6-K9G/\n\t/// </summary>\n\tpublic sealed class MissingInteger\n\t{\n\t\tpublic Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length <= 0)\n\t\t\t{\n\t\t\t\treturn 1;\n\t\t\t}\n\n\t\t\tHashSet<Int32> items = new HashSet<Int32>();\n\n\t\t\tBoolean allNonpositive = true;\n\n\t\t\tfor (Int32 q = 0; q < input.Length; q++)\n\t\t\t{\n\t\t\t\tInt32 currentItem = input[q];\n\n\t\t\t\tif (currentItem > 0)\n\t\t\t\t{\n\t\t\t\t\tallNonpositive = false;\n\n\t\t\t\t\tif (!items.Contains(currentItem))\n\t\t\t\t\t{\n\t\t\t\t\t\titems.Add(currentItem);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (allNonpositive)\n\t\t\t{\n\t\t\t\treturn 1;\n\t\t\t}\n\n\t\t\tfor (Int32 q = 1; ; q++)\n\t\t\t{\n\t\t\t\tif (!items.Contains(q))\n\t\t\t\t{\n\t\t\t\t\treturn q;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tthrow new ApplicationException(\"How did it happen ??\");\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.6214953064918518,
"alphanum_fraction": 0.6600467562675476,
"avg_line_length": 18.0222225189209,
"blob_id": "3dbc7a0c09b8015419c5e71e11ad988831e10946",
"content_id": "5cf1f6d65beb1e435733cb446ecaf5871c993ba3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 858,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 45,
"path": "/Codility.Training.Tests/PermCheckTest.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class PermCheckTest\n\t{\n\t\tprivate readonly PermCheck _target = new PermCheck();\n\n\t\t[TestMethod]\n\t\tpublic void TestEmpty()\n\t\t{\n\t\t\tAssert.AreEqual(0, _target.Solve(new Int32[] { }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void Test01()\n\t\t{\n\t\t\tAssert.AreEqual(0, _target.Solve(new Int32[] { 1,3 }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void Test02()\n\t\t{\n\t\t\tAssert.AreEqual(1, _target.Solve(new Int32[] { 2, 1 }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void Test03()\n\t\t{\n\t\t\tAssert.AreEqual(1, _target.Solve(new Int32[] { 2, 1, 3 }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void Test04()\n\t\t{\n\t\t\tAssert.AreEqual(0, _target.Solve(new Int32[] { 2, 1, 4 }));\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5077793598175049,
"alphanum_fraction": 0.5332390666007996,
"avg_line_length": 22.600000381469727,
"blob_id": "5c6187150c0ac12f072020a0d306caa6f7394b54",
"content_id": "f8212a39146e5cf43bfb36dc132e880c0c76a4c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 707,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 30,
"path": "/Python/CountFactors.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# task: https://app.codility.com/programmers/lessons/10-prime_and_composite_numbers/count_factors/\n# solution: https://app.codility.com/demo/results/trainingW3A2V4-9UQ/\n\nimport math\nimport collections\n\ndef solution(input: int) -> int:\n if input < 0:\n raise ValueError\n \n if input < 2:\n return 1\n \n if input < 4:\n return 2\n \n counter = 0\n \n for ix in range(2, math.floor(math.sqrt(input)) + 1):\n v = input / ix\n \n if math.floor(v) == v:\n if ix != v:\n counter = counter + 2\n else:\n counter = counter + 1\n \n counter = counter + 2 # 1 and input\n \n return counter"
},
{
"alpha_fraction": 0.5164670944213867,
"alphanum_fraction": 0.5329341292381287,
"avg_line_length": 21.86206817626953,
"blob_id": "5c5d63789f843623ddb99f862540915092c81f44",
"content_id": "3b291f300e833448504c406829caaa08a306b85a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1336,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 58,
"path": "/Python/EquiLeaders.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# Task: https://app.codility.com/programmers/lessons/8-leader/equi_leader/\n# Solution: https://app.codility.com/demo/results/trainingJWUPP9-QG5/\n\nfrom collections import defaultdict\n\ndef Leaders(arr):\n if (arr == None):\n raise ValueError\n\n arrLen = len(arr)\n \n leaders = defaultdict(int)\n \n itemCounters = defaultdict(int)\n \n currentMax = -1\n currentMaxCounter = 0\n \n for (ix, x) in enumerate(arr):\n itemCounter = itemCounters[x] + 1\n itemCounters[x] = itemCounter\n \n if (itemCounter > currentMaxCounter):\n currentMax = x\n currentMaxCounter = itemCounter\n \n if (currentMaxCounter > ((ix + 1) // 2)):\n leaders[ix] = currentMax\n \n return leaders\n \n\ndef solution(arr):\n if (arr == None):\n raise ValueError\n \n arrLen = len(arr)\n if (arrLen < 2):\n return 0\n \n leaders1 = Leaders(arr)\n leaders2 = Leaders(arr[::-1])\n \n equiLeaders = 0\n \n for x in range(arrLen - 1):\n if x not in leaders1:\n continue\n \n revX = (arrLen - 1) - x - 1\n \n if revX not in leaders2:\n continue\n \n if leaders1[x] == leaders2[revX]:\n equiLeaders += 1\n \n return equiLeaders\n \n \n"
},
{
"alpha_fraction": 0.5161048769950867,
"alphanum_fraction": 0.5258427262306213,
"avg_line_length": 16.115385055541992,
"blob_id": "1afb37c2c32242b446f761e37025fe052e046167",
"content_id": "7f5c1ac95f39663060f055798c53ca0882427ec7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1337,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 78,
"path": "/Codility.Training/Brackets.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of codility Brackets task\n\t/// https://codility.com/demo/take-sample-test/brackets\n\t/// with results\n\t/// https://codility.com/demo/results/demoBPMYF4-ZVZ/\n\t/// </summary>\n\tpublic sealed class Brackets\n\t{\n\t\tpublic Int32 Solve(String input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (String.IsNullOrWhiteSpace(input))\n\t\t\t{\n\t\t\t\treturn 1;\n\t\t\t}\n\n\t\t\tChar[] chars = input.ToCharArray();\n\n\t\t\tStack<Char> stack = new Stack<Char>();\n\n\t\t\tDictionary<Char, Char> map = new Dictionary<Char, Char>()\n\t\t\t{\n\t\t\t\t{'{', '}'},\n\t\t\t\t{'[', ']'},\n\t\t\t\t{'(', ')'}\n\t\t\t};\n\n\t\t\tfor (Int32 q = 0; q < chars.Length; q++)\n\t\t\t{\n\t\t\t\tChar current = chars[q];\n\n\t\t\t\tif (current == '{' || current == '[' || current == '(')\n\t\t\t\t{\n\t\t\t\t\tstack.Push(current);\n\t\t\t\t}\n\t\t\t\telse if (current == '}' || current == ']' || current == ')')\n\t\t\t\t{\n\t\t\t\t\tif (stack.Count >= 1)\n\t\t\t\t\t{\n\t\t\t\t\t\tChar topItem = stack.Peek();\n\n\t\t\t\t\t\tif (current == map[topItem])\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tstack.Pop();\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\treturn 0;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\treturn 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (stack.Count > 0)\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\treturn 1;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5966981053352356,
"alphanum_fraction": 0.6297169923782349,
"avg_line_length": 15.960000038146973,
"blob_id": "e534f9f4221ec0c890b12e48e2e521e655ce6cec",
"content_id": "28c9058e45f81680cdf2ccb60288102839d37f34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1274,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 75,
"path": "/Codility.Training/Dominator.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task Dominator\n\t/// https://codility.com/demo/take-sample-test/dominator\n\t/// and here are results\n\t/// https://codility.com/demo/results/demoCJF6R7-673/\n\t/// </summary>\n\tpublic sealed class Dominator\n\t{\n\t\tpublic Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length <= 0)\n\t\t\t{\n\t\t\t\treturn -1;\n\t\t\t}\n\n\t\t\tif (input.Length == 1)\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\tDictionary<Int32, Int32> counters = new Dictionary<Int32, Int32>();\n\n\t\t\tInt32 currentMaxCount = 0;\n\n\t\t\tInt32 currentMaxItem = 0;\n\n\t\t\tInt32 lastMaxIndex = -1;\n\n\t\t\tfor (Int32 q = 0; q < input.Length; q++)\n\t\t\t{\n\t\t\t\tInt32 current = input[q];\n\n\t\t\t\tInt32 itemCounter = 0;\n\n\t\t\t\tcounters.TryGetValue(current, out itemCounter);\n\n\t\t\t\titemCounter++;\n\n\t\t\t\tcounters[current] = itemCounter;\n\n\t\t\t\tif (itemCounter > currentMaxCount)\n\t\t\t\t{\n\t\t\t\t\tcurrentMaxCount = itemCounter;\n\n\t\t\t\t\tcurrentMaxItem = current;\n\n\t\t\t\t\tlastMaxIndex = q;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tInt32 half = input.Length / 2;\n\n\t\t\tif (currentMaxCount > half)\n\t\t\t{\n\t\t\t\treturn lastMaxIndex;\n\t\t\t}\n\t\t\t\n\n\t\t\treturn -1;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5585970878601074,
"alphanum_fraction": 0.6347305178642273,
"avg_line_length": 21.921567916870117,
"blob_id": "95f5468beb5774ef128c2100668ed0d5620c6a5d",
"content_id": "179aad8c5b616001e2c228996622b8fb1d45c48d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1171,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 51,
"path": "/Codility.Training.Tests/CountSemiprimesTest.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class CountSemiprimesTest\n\t{\n\t\t[TestMethod]\n\t\tpublic void TestEmpty()\n\t\t{\n\t\t\tInt32[] result = new CountSemiprimes().Solve(30, new Int32[0], new Int32[0]);\n\n\t\t\tAssert.AreEqual(0, result.Length);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestExample01()\n\t\t{\n\t\t\tInt32[] result = new CountSemiprimes().Solve(30, new Int32[] { 0, 4, 11, 11, 11, 14 }, new Int32[] { 30, 9, 16, 23, 25, 25 });\n\n\t\t\tInt32[] expected = new Int32[] { 10, 3, 2, 4, 5, 5 };\n\n\t\t\tAssert.AreEqual(6, result.Length);\n\n\t\t\tfor (Int32 q = 0; q < result.Length; q++)\n\t\t\t{\n\t\t\t\tAssert.AreEqual(expected[q], result[q]);\n\t\t\t}\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestExample02()\n\t\t{\n\t\t\tInt32[] result = new CountSemiprimes().Solve(26, new Int32[] { 1, 4, 16 }, new Int32[] { 26, 10, 20 });\n\n\t\t\tInt32[] expected = new Int32[] { 10, 4, 0 };\n\n\t\t\tAssert.AreEqual(3, result.Length);\n\n\t\t\tfor (Int32 q = 0; q < result.Length; q++)\n\t\t\t{\n\t\t\t\tAssert.AreEqual(expected[q], result[q]);\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.6557376980781555,
"alphanum_fraction": 0.6721311211585999,
"avg_line_length": 23.600000381469727,
"blob_id": "8d5873f1588ed8b87a6f5d2cd0601a1cf3994a33",
"content_id": "d14fa94b877c0fd572a91a69d25f4b1f8407cf56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 122,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 5,
"path": "/Python/Distinct.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# https://app.codility.com/demo/results/trainingD6NGDZ-8RQ/\n\ndef Distinct(arr):\n m = {x for x in arr}\n return len(m)"
},
{
"alpha_fraction": 0.6642547249794006,
"alphanum_fraction": 0.6743849515914917,
"avg_line_length": 19.878787994384766,
"blob_id": "32fcb45ed735accd6893424e85db0bf7899048a9",
"content_id": "26d21a2e85d4d460a499059dbb75bf40247feee8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 33,
"path": "/Codility.Training.Tests/BracketsTest.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class BracketsTest\n\t{\n\t\tprivate readonly Brackets _target = new Brackets();\n\n\t\t[TestMethod]\n\t\tpublic void TestParentheses()\n\t\t{\n\t\t\tAssert.AreEqual(1, _target.Solve(\"{}\"));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(\"[]\"));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(\"()\"));\n\n\t\t\tAssert.AreEqual(1, _target.Solve(\"([])\"));\n\n\t\t\tAssert.AreEqual(0, _target.Solve(\"([)]\"));\n\n\t\t\tAssert.AreEqual(0, _target.Solve(\"(\"));\n\n\t\t\tAssert.AreEqual(0, _target.Solve(\"[](\"));\n\t\t}\n\t}\n}\n\n\n"
},
{
"alpha_fraction": 0.6467686295509338,
"alphanum_fraction": 0.6901825070381165,
"avg_line_length": 17.098215103149414,
"blob_id": "6c717dfcac455402abb576b4fe3782bdd165e79a",
"content_id": "336fc9ecad9123d48fc5965124e0eeccc81d09dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2029,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 112,
"path": "/Codility.Training.Tests/ChocolatesByNumbersTests.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class ChocolatesByNumbersTests\n\t{\n\t\tprivate readonly ChocolatesByNumbers _target = new ChocolatesByNumbers();\n\n\t\t[TestMethod]\n\t\tpublic void SampleChockolateTest()\n\t\t{\n\t\t\tInt32 result = _target.Solve(10, 4);\n\n\t\t\tAssert.AreEqual(5, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest06()\n\t\t{\n\t\t\tInt32 result = _target.Solve(9, 4);\n\n\t\t\tAssert.AreEqual(9, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest07()\n\t\t{\n\t\t\tInt32 result = _target.Solve(10, 12);\n\n\t\t\tAssert.AreEqual(5, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest09()\n\t\t{\n\t\t\tInt32 result = _target.Solve(1, 2);\n\n\t\t\tAssert.AreEqual(1, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest01()\n\t\t{\n\t\t\tInt32 result = _target.Solve(6, 3);\n\n\t\t\tAssert.AreEqual(2, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest02()\n\t\t{\n\t\t\tInt32 result = _target.Solve(6, 1);\n\n\t\t\tAssert.AreEqual(6, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest03()\n\t\t{\n\t\t\tInt32 result = _target.Solve(6, 6);\n\n\t\t\tAssert.AreEqual(1, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest04()\n\t\t{\n\t\t\tInt32 result = _target.Solve(6, 7);\n\n\t\t\tAssert.AreEqual(6, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void ChockolateTest05()\n\t\t{\n\t\t\tInt32 result = _target.Solve(6, 8);\n\n\t\t\tAssert.AreEqual(3, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void LargestCommonDivisorTest01()\n\t\t{\n\t\t\tInt64 result = ChocolatesByNumbers.LargestCommonDivisor(8, 6);\n\n\t\t\tAssert.AreEqual(2, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void LeastCommonMultiplicationTest01()\n\t\t{\n\t\t\tInt64 result = ChocolatesByNumbers.LeastCommonMultiplication(2, 3);\n\n\t\t\tAssert.AreEqual(6, result);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void LeastCommonMultiplicationTest02()\n\t\t{\n\t\t\tInt64 result = ChocolatesByNumbers.LeastCommonMultiplication(15, 10);\n\n\t\t\tAssert.AreEqual(30, result);\n\t\t}\n\n\t}\n}\n"
},
{
"alpha_fraction": 0.48136141896247864,
"alphanum_fraction": 0.5121555924415588,
"avg_line_length": 22.769229888916016,
"blob_id": "f0a72595ab18686a175683b6845d1c0c0ad3a7b5",
"content_id": "feafdd8370ccaa55de0c61070060b7ead75177c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 617,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 26,
"path": "/Python/BinaryGap.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "import sys\n\n# https://app.codility.com/demo/results/training5ARS34-NED/\ndef binary_gap(input: int) -> int:\n if input < 1 or input > sys.maxsize:\n raise ValueError\n \n x = input\n current_count = 0\n max_gap = 0\n count_gap = False\n while x != 0:\n bit = x & 1\n if bit == 0 and count_gap:\n current_count = current_count + 1\n if bit == 1:\n count_gap = True\n if current_count > max_gap:\n max_gap = current_count\n current_count = 0\n \n x = x >> 1\n\n return max_gap\n\n#print(binary_gap(561892))"
},
{
"alpha_fraction": 0.4830188751220703,
"alphanum_fraction": 0.5094339847564697,
"avg_line_length": 19.384614944458008,
"blob_id": "511907c94d0bcacdcc57b65a2d7b93e8ecc71728",
"content_id": "fed4853adee6415e60825cb49be2c1103e0303f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 13,
"path": "/Python/Triangles.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n# https://app.codility.com/demo/results/trainingBGZ5KN-W9Z/\n\ndef solution(arr):\n arr.sort()\n le = len(arr) - 2\n for ix in range(le):\n if arr[ix] + arr[ix + 1] > arr[ix + 2]:\n return 1\n\n if ix == le:\n break\n\n return 0"
},
{
"alpha_fraction": 0.5318725109100342,
"alphanum_fraction": 0.5657370686531067,
"avg_line_length": 21.863636016845703,
"blob_id": "c11b82c4bb2f230db6759e88205cdd2bc18e6a41",
"content_id": "f67856d28c7134709431bd2aad6af7ee5323c427",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 502,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 22,
"path": "/Python/MinPerimeterRectangle.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "import math\n\n# task: https://app.codility.com/programmers/lessons/10-prime_and_composite_numbers/min_perimeter_rectangle/\n# solution: https://app.codility.com/demo/results/training3DNWVX-97Z/\n\ndef solution(input: int) -> int:\n if input < 1:\n raise ValueError\n\n x = math.sqrt(input)\n \n v1 = math.ceil(x)\n \n while True:\n v2 = input / v1\n \n if v2 == math.ceil(v2):\n break\n \n v1 = v1 - 1\n \n return math.ceil(2 * (v1 + v2))"
},
{
"alpha_fraction": 0.4173228442668915,
"alphanum_fraction": 0.4261811077594757,
"avg_line_length": 24.846153259277344,
"blob_id": "815a7a55d34b00ca1fdb7b934b77d8aa1910e55c",
"content_id": "870e60820a40c635121d907b0fd4b5bca9d81d3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1016,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 39,
"path": "/Python/Brackets.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# Brackets\n# Task: https://app.codility.com/programmers/lessons/7-stacks_and_queues/brackets/\n# Solution and results: https://app.codility.com/demo/results/training9RBUZY-DVN/\n\ndef solution(input):\n if input == None:\n raise ValueError\n \n if input == '':\n return 1\n \n stack = []\n \n for ch in input:\n if ch == '(':\n stack.append('(')\n elif ch == ')':\n last = stack.pop()\n if last != '(':\n return 0\n elif ch == '[':\n stack.append('[')\n elif ch == ']':\n last = stack.pop()\n if last != '[':\n return 0\n elif ch == '{':\n stack.append('{')\n elif ch == '}':\n last = stack.pop()\n if last != '{':\n return 0\n else:\n raise ValueError(\"Input string contains invalid characters\")\n \n if len(stack) == 0:\n return 1\n \n return 0\n "
},
{
"alpha_fraction": 0.5388813018798828,
"alphanum_fraction": 0.5648021697998047,
"avg_line_length": 20.58823585510254,
"blob_id": "5b65f5bee7d88232af29fe808e48585a6f473226",
"content_id": "13a37bf41125a8eec8042e6a9b2cac33d1bed882",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 733,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 34,
"path": "/Python/TapeEquilibrium.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "import math\n\n#https://app.codility.com/demo/results/trainingSMJ6AD-ZBK/\ndef tape_equilibrium(arr):\n if len(arr) < 2:\n raise ValueError\n\n temp = []\n current_sum = 0\n current_abs_sum = 0\n\n for x in arr:\n current_sum += x\n current_abs_sum += abs(x)\n temp.append(current_sum)\n\n del temp[len(temp) - 1]\n\n min_diff = current_abs_sum + 1\n min_diff_index = -1\n ix = 1\n\n for s in temp:\n current_diff = abs((current_sum - s) - s)\n if (current_diff < min_diff):\n min_diff = current_diff\n min_diff_index = ix\n ix += 1\n\n return min_diff\n\n#print(tape_equilibrium([-1,3]))\n#print(tape_equilibrium([1,2,3]))\n#print(tape_equilibrium([3,1,2,4,3]))"
},
{
"alpha_fraction": 0.5783348083496094,
"alphanum_fraction": 0.6087735295295715,
"avg_line_length": 17.016128540039062,
"blob_id": "b443eef948ebb601b12de709478d028a54ba4e93",
"content_id": "33db015e5d57f34cf9f03dd74553fb8a52c174b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 62,
"path": "/Codility.Training/TapeEquilibrium.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// solution of the codility training task\n\t/// https://codility.com/demo/take-sample-test/tape_equilibrium\n\t/// and here are my results:\n\t/// https://codility.com/demo/results/demoPG98CP-UQA/\n\t/// </summary>\n\tpublic sealed class TapeEquilibrium\n {\n\t\tpublic Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length <= 0)\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\tif (input.Length == 1)\n\t\t\t{\n\t\t\t\treturn input[0];\n\t\t\t}\n\n\t\t\tList<Int64> sums = new List<Int64>();\n\n\t\t\tInt64 sumOfAll = 0;\n\n\t\t\tfor (Int32 q = 0; q < input.Length - 1; q++)\n\t\t\t{\n\t\t\t\tsumOfAll += input[q];\n\n\t\t\t\tsums.Add(sumOfAll);\n\t\t\t}\n\n\t\t\tsumOfAll += input[input.Length - 1];\n\n\t\t\tInt64 minSum = Int32.MaxValue;\n\n\t\t\tfor (Int32 q = 0; q < sums.Count; q++)\n\t\t\t{\n\t\t\t\tInt64 currentValue = Math.Abs(2 * sums[q] - sumOfAll);\n\n\t\t\t\tif (currentValue < minSum)\n\t\t\t\t{\n\t\t\t\t\tminSum = currentValue;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn (Int32)minSum;\n\t\t}\n }\n}\n"
},
{
"alpha_fraction": 0.598998486995697,
"alphanum_fraction": 0.6201848983764648,
"avg_line_length": 19.44094467163086,
"blob_id": "7799779dfbed26fe5328dc7607e04b6c26d08fc0",
"content_id": "883d2b99c495676d69f342b887606634b5aaf328",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2598,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 127,
"path": "/Codility.Training/NumberOfInterSectingDisks.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of Codility task:\n\t/// https://codility.com/demo/take-sample-test/number_of_disc_intersections\n\t/// with my results\n\t/// https://codility.com/demo/results/demoHEK58H-ZXF/\n\t/// </summary>\n\tpublic sealed class NumberOfInterSectingDisks\n\t{\n\t\tprivate class DiscInfo : IComparer<DiscInfo>\n\t\t{\n\t\t\tinternal Int32 StartPoint { get; set; }\n\n\t\t\tinternal Int32 Radius { get; set; }\n\n\t\t\tinternal Int32 EndPoint { get; set; }\n\n\t\t\tinternal Int32 Center { get; set; }\n\n\t\t\tpublic Int32 Compare(DiscInfo x, DiscInfo y)\n\t\t\t{\n\t\t\t\treturn x.EndPoint.CompareTo(y.EndPoint);\n\t\t\t}\n\t\t}\n\n\t\tpublic static Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length <= 1)\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\tList<DiscInfo> discs = new List<DiscInfo>(input.Length);\n\n\t\t\tfor (Int32 q = 0; q < input.Length; q++)\n\t\t\t{\n\t\t\t\tInt32 currentRadius = input[q];\n\n\t\t\t\tif (currentRadius < 0)\n\t\t\t\t{\n\t\t\t\t\tthrow new ArgumentException(\"Invalid radius value \" + currentRadius);\n\t\t\t\t}\n\n\t\t\t\tdiscs.Add(new DiscInfo\n\t\t\t\t{\n\t\t\t\t\tStartPoint = q - currentRadius,\n\t\t\t\t\tRadius = currentRadius,\n\t\t\t\t\tEndPoint = q + currentRadius,\n\t\t\t\t\tCenter = q\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tdiscs.Sort((x, y) => x.StartPoint.CompareTo(y.StartPoint));\n\n\t\t\tList<DiscInfo> closingDiscs = new List<DiscInfo>();\n\n\t\t\tHashSet<Int32> openedDiscs = new HashSet<Int32>();\n\n\t\t\tInt32 interSectingDiscs = 0;\n\n\t\t\tIComparer<DiscInfo> comparer = new DiscInfo();\n\n\t\t\tfor (Int32 q = 0, qMax = discs.Count; q < qMax; q++)\n\t\t\t{\n\t\t\t\tDiscInfo currentDisc = discs[q];\n\n\t\t\t\tInt32 currentStartPoint = currentDisc.StartPoint;\n\n\t\t\t\t////////////////////////////////////////////////////////\n\t\t\t\t//// close pending discs\n\t\t\t\t////\n\n\t\t\t\twhile (closingDiscs.Count > 0 && currentStartPoint > closingDiscs[0].EndPoint)\n\t\t\t\t{\n\t\t\t\t\topenedDiscs.Remove(closingDiscs[0].Center);\n\n\t\t\t\t\tclosingDiscs.RemoveAt(0);\n\t\t\t\t}\n\n\t\t\t\tinterSectingDiscs += openedDiscs.Count;\n\n\t\t\t\tif (interSectingDiscs > 10000000)\n\t\t\t\t{\n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\n\t\t\t\topenedDiscs.Add(currentDisc.Center);\n\n\t\t\t\tif (closingDiscs.Count > 0)\n\t\t\t\t{\n\t\t\t\t\tInt32 index = closingDiscs.BinarySearch(currentDisc, comparer);\n\n\t\t\t\t\tInt32 insertIndex;\n\n\t\t\t\t\tif (index >= 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tinsertIndex = index;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tinsertIndex = ~index;\n\t\t\t\t\t}\n\n\t\t\t\t\tclosingDiscs.Insert(insertIndex, currentDisc);\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tclosingDiscs.Add(currentDisc);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn interSectingDiscs;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5858085751533508,
"alphanum_fraction": 0.6126237511634827,
"avg_line_length": 18.08661460876465,
"blob_id": "93c4deed836ccd2ecb1bf251a381dd353f3b2d3c",
"content_id": "615871c908c80f7bbd68d23660be47f6b9ee528e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2426,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 127,
"path": "/Codility.Training/EquiLeader.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task EquiLeader\n\t/// https://codility.com/demo/take-sample-test/equi_leader\n\t/// and here are results\n\t/// https://codility.com/demo/results/demoN4WJES-YZU/\n\t/// </summary>\n\tpublic sealed class EquiLeader\n\t{\n\t\tpublic Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length <= 1)\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\tNullable<Int32>[] equiLeadersA = new Nullable<Int32>[input.Length - 1];\n\n\t\t\tDictionary<Int32, Int32> counters = new Dictionary<Int32, Int32>();\n\n\t\t\tNullable<Int32> currentLeader = null;\n\n\t\t\tInt32 previousLeaderCount = 0;\n\n\t\t\tfor (Int32 q = 0; q < equiLeadersA.Length; q++)\n\t\t\t{\n\t\t\t\tInt32 currentItem = input[q];\n\n\t\t\t\tif (!counters.ContainsKey(currentItem))\n\t\t\t\t{\n\t\t\t\t\tcounters.Add(currentItem, 0);\n\t\t\t\t}\n\n\t\t\t\tInt32 half = (q + 1) / 2;\n\n\t\t\t\tInt32 newValue = counters[currentItem] + 1;\n\n\t\t\t\tcounters[currentItem] = newValue;\n\n\t\t\t\tif (newValue > half)\n\t\t\t\t{\n\t\t\t\t\tcurrentLeader = currentItem;\n\n\t\t\t\t\tpreviousLeaderCount = newValue;\n\t\t\t\t}\n\t\t\t\telse if (currentLeader.HasValue && previousLeaderCount > half)\n\t\t\t\t{\n\t\t\t\t\t///\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcurrentLeader = null;\n\t\t\t\t}\n\n\t\t\t\tequiLeadersA[q] = currentLeader;\n\t\t\t}\n\n\t\t\tNullable<Int32>[] equiLeadersB = new Nullable<Int32>[input.Length - 1];\n\n\t\t\tpreviousLeaderCount = 0;\n\n\t\t\tcounters.Clear();\n\n\t\t\tcurrentLeader = null;\n\n\t\t\tfor (Int32 q = equiLeadersB.Length - 1; q >= 0; q--)\n\t\t\t{\n\t\t\t\tInt32 currentItem = input[q + 1];\n\n\t\t\t\tif (!counters.ContainsKey(currentItem))\n\t\t\t\t{\n\t\t\t\t\tcounters.Add(currentItem, 0);\n\t\t\t\t}\n\n\t\t\t\tInt32 half = (input.Length - q - 1) / 2;\n\n\t\t\t\tInt32 newValue = counters[currentItem] + 1;\n\n\t\t\t\tcounters[currentItem] = newValue;\n\n\t\t\t\tif (newValue > half)\n\t\t\t\t{\n\t\t\t\t\tcurrentLeader = currentItem;\n\n\t\t\t\t\tpreviousLeaderCount = newValue;\n\t\t\t\t}\n\t\t\t\telse if (currentLeader.HasValue && previousLeaderCount > half)\n\t\t\t\t{\n\t\t\t\t\t///\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcurrentLeader = null;\n\t\t\t\t}\n\n\t\t\t\tequiLeadersB[q] = currentLeader;\n\t\t\t}\n\n\t\t\tInt32 counter = 0;\n\n\t\t\tfor (Int32 q = 0; q < equiLeadersA.Length; q++)\n\t\t\t{\n\t\t\t\tif (equiLeadersA[q].HasValue && equiLeadersB[q].HasValue)\n\t\t\t\t{\n\t\t\t\t\tif (equiLeadersA[q].Value == equiLeadersB[q].Value)\n\t\t\t\t\t{\n\t\t\t\t\t\tcounter++;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn counter;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5634379386901855,
"alphanum_fraction": 0.5825375318527222,
"avg_line_length": 15.288888931274414,
"blob_id": "01472fa6c608bda7c5ee66f8951b02c959e34563",
"content_id": "f7ee295d5271c2879665b0e59984835b44e0ce1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1468,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 90,
"path": "/Codility.Training/MaxSliceSum.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task MaxSliceSum\n\t/// https://codility.com/demo/take-sample-test/max_slice_sum\n\t/// and here are results\n\t/// https://codility.com/demo/results/demoKGBZ9U-G3Z/\n\t/// </summary>\n\tpublic sealed class MaxSliceSum\n\t{\n\t\tpublic Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length == 0)\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\tif (input.Length == 1)\n\t\t\t{\n\t\t\t\treturn input[0];\n\t\t\t}\n\n\t\t\tInt64 maxSliceSum = 0;\n\n\t\t\tInt64 currentSum = 0;\n\n\t\t\tNullable<Int32> maxNegative = null;\n\n\t\t\tBoolean allNegative = true;\n\n\t\t\tfor (Int32 q = 0; q < input.Length; q++)\n\t\t\t{\n\t\t\t\tInt32 currentItem = input[q];\n\n\t\t\t\tif (currentItem >= 0)\n\t\t\t\t{\n\t\t\t\t\tallNegative = false;\n\n\t\t\t\t\tcurrentSum += currentItem;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tif (maxNegative.HasValue)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (currentItem > maxNegative.Value)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tmaxNegative = currentItem;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tmaxNegative = currentItem;\n\t\t\t\t\t}\n\n\t\t\t\t\tif (currentSum + currentItem > 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tcurrentSum += currentItem;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tcurrentSum = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (currentSum > maxSliceSum)\n\t\t\t\t{\n\t\t\t\t\tmaxSliceSum = currentSum;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (allNegative)\n\t\t\t{\n\t\t\t\treturn maxNegative.Value;\n\t\t\t}\n\n\t\t\treturn (Int32)maxSliceSum;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5252225399017334,
"alphanum_fraction": 0.5415430068969727,
"avg_line_length": 23.66666603088379,
"blob_id": "19e74c514265e565f3bddf592c5b392ae27c57e3",
"content_id": "046a8e659f5793b0b06b72397c6adda1edbda283",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 674,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 27,
"path": "/Python/CodilityTieRopes.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# https://app.codility.com/programmers/lessons/16-greedy_algorithms/tie_ropes/start/\n# https://app.codility.com/demo/results/trainingVD4GEU-NET/\n\ndef solution(minLen, ropes):\n if (None == ropes):\n raise ValueError(\"ropes array is None\")\n\n if len(ropes) <= 0:\n raise ValueError(\"ropes array is empty\")\n \n if minLen <= 0:\n raise ValueError()\n \n currLen = 0\n count = 0\n \n for r in ropes:\n currLen = currLen + r\n if currLen >= minLen:\n count = count + 1\n currLen = 0\n \n if currLen >= minLen:\n count = count + 1\n currLen = 0\n \n return count\n "
},
{
"alpha_fraction": 0.5961538553237915,
"alphanum_fraction": 0.6085972785949707,
"avg_line_length": 26.65625,
"blob_id": "4826997f0e0b7afd8fff02080f2f25f134b3c6d0",
"content_id": "0b72f0107c7780c5ebaf1585ec81656e936e611e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 884,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 32,
"path": "/Python/MinAvgTwoSlice.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "import sys\n\ndef MinAvgTwoSlice(arr):\n minSliceIndex = -1\n minSliceAvg = sys.maxsize\n currentSliceSum = 0\n currentSliceStart = 0\n currentSliceLength = 0\n currentSliceEnd = 0\n arrLen = len(arr)\n\n while currentSliceStart < arrLen:\n if currentSliceLength < 3:\n if currentSliceEnd >= arrLen:\n break\n\n currentSliceSum += arr[currentSliceEnd]\n currentSliceLength += 1\n currentSliceEnd += 1\n else:\n currentSliceSum -= arr[currentSliceStart]\n currentSliceStart += 1\n currentSliceLength -= 1\n\n if currentSliceLength >= 2:\n currentSliceAvg = currentSliceSum / currentSliceLength\n\n if currentSliceAvg < minSliceAvg:\n minSliceAvg = currentSliceAvg\n minSliceIndex = currentSliceStart\n\n return minSliceIndex"
},
{
"alpha_fraction": 0.6064285635948181,
"alphanum_fraction": 0.6307142972946167,
"avg_line_length": 19.28985595703125,
"blob_id": "2ba1b6ae62e8f521e5e0d1e5e3ebed56b6753c92",
"content_id": "e5778ada82324dad73038ba35e1ba82e9776e1ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1402,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 69,
"path": "/Codility.Training/NumberSolitair.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my soution of the Codility problem:\n\t/// https://codility.com/demo/take-sample-test/number_solitaire\n\t/// with results\n\t/// https://codility.com/demo/results/demoWVAR9N-333/\n\t/// </summary>\n\n\tpublic sealed class NumberSolitair\n\t{\n\t\tpublic Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentException();\n\t\t\t}\n\n\t\t\tNullable<Int32>[] maxValues = new Nullable<Int32>[input.Length];\n\n\t\t\tfor (Int32 q = 0; q < input.Length; q++)\n\t\t\t{\n\t\t\t\tNullable<Int32> previousMax = GetMaxValue(maxValues, q);\n\n\t\t\t\tif (previousMax.HasValue)\n\t\t\t\t{\n\t\t\t\t\tmaxValues[q] = previousMax.Value + input[q];\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tmaxValues[q] = input[q];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn maxValues[input.Length - 1].Value;\n\t\t}\n\n\t\tprivate static Nullable<Int32> GetMaxValue(Nullable<Int32>[] maxValues, Int32 index)\n\t\t{\n\t\t\tNullable<Int32> currentMax = null;\n\n\t\t\tfor (Int32 q = 1; q <= 6; q++)\n\t\t\t{\n\t\t\t\tInt32 backItem = index - q;\n\n\t\t\t\tif (backItem >= 0)\n\t\t\t\t{\n\t\t\t\t\tif (!currentMax.HasValue || (currentMax.HasValue && maxValues[backItem].Value > currentMax.Value))\n\t\t\t\t\t{\n\t\t\t\t\t\tcurrentMax = maxValues[backItem];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn currentMax;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5509076714515686,
"alphanum_fraction": 0.5666929483413696,
"avg_line_length": 16.59722137451172,
"blob_id": "e68c31e8187e4523e008c4c6b52e92485f01c2ac",
"content_id": "29c5afc07720b49fea283349f94007276fa14156",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1269,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 72,
"path": "/Codility.Training/CountDiv.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\tpublic sealed class CountDiv\n\t{\n\t\t/// <summary>\n\t\t/// my solution of the codility task:\n\t\t/// https://codility.com/demo/take-sample-test/count_div\n\t\t/// with results\n\t\t/// https://codility.com/demo/results/demo2ZCSZK-UTQ/\n\t\t/// </summary>\n\t\t/// <param name=\"a\"></param>\n\t\t/// <param name=\"b\"></param>\n\t\t/// <param name=\"k\"></param>\n\t\t/// <returns></returns>\n\t\tpublic Int32 Solve(Int32 a, Int32 b, Int32 k)\n\t\t{\n\t\t\tif (a < 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"a\");\n\t\t\t}\n\n\t\t\tif (b < 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"b\");\n\t\t\t}\n\n\t\t\tif (k < 1)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"k\");\n\t\t\t}\n\n\t\t\tif (a > b)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"a\");\n\t\t\t}\n\n\t\t\tInt32 rangeLength = b - a + 1;\n\n\t\t\tif (k == 1)\n\t\t\t{\n\t\t\t\treturn rangeLength;\n\t\t\t}\n\n\t\t\tif (rangeLength % k == 0) /// if range length is a multiple of k \n\t\t\t{\n\t\t\t\treturn rangeLength / k;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tif (a % k == 0)\n\t\t\t\t{\n\t\t\t\t\treturn rangeLength / k + 1;\n\t\t\t\t}\n\n\t\t\t\tif (a % k + rangeLength % k> k)\n\t\t\t\t{\n\t\t\t\t\treturn rangeLength / k + 1;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\treturn rangeLength / k;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.4921259880065918,
"alphanum_fraction": 0.5236220359802246,
"avg_line_length": 18.538461685180664,
"blob_id": "724e70dc1f5accea12ef42bd6f1135e6b3d5c631",
"content_id": "7cef9ab6040eedc6dced2169846e95da222afafd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 13,
"path": "/Python/PermMissingElem.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n#https://app.codility.com/demo/results/trainingQ37ACM-3XF/\ndef perm_missing_elem(arr):\n if arr == []:\n return 1\n\n al = len(arr)\n sum = 0\n sum_all = (al + 1) * (al + 2) // 2\n\n for x in arr:\n sum += x\n\n return sum_all - sum"
},
{
"alpha_fraction": 0.4775967299938202,
"alphanum_fraction": 0.5071282982826233,
"avg_line_length": 19.67368507385254,
"blob_id": "99c2a546e6167ea88579e860d017d2a552046cd3",
"content_id": "04f89eaf9dcc61b8b2287c2697ed455f5d39874d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1964,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 95,
"path": "/Python/CommonPrimeDivisors92.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n#\n# https://app.codility.com/programmers/lessons/12-euclidean_algorithm/common_prime_divisors/\n# https://app.codility.com/demo/results/training7YTF68-B9E/\n#\n\ndef LargestCommonDivisor(a, b):\n if a == 0:\n raise ValueError()\n \n if b == 0:\n raise ValueError()\n \n if (a % b) == 0:\n return b\n\n if (b % a) == 0:\n return a\n \n if a >= b:\n return LargestCommonDivisor(b, a % b)\n else:\n return LargestCommonDivisor(a, b % a)\n \ndef UniquePrimeDivisors(x):\n result = set()\n divisor = 2\n while divisor < x:\n if x % divisor == 0:\n result.add(divisor)\n x = x / divisor\n else:\n divisor += 1\n result.add(int(x))\n return result\n \ndef HaveAllCommonPrimeDivisorsExt2(x1,x2):\n if x1 == x2:\n return True\n\n lcd = LargestCommonDivisor(x1, x2)\n \n if lcd == 1 and not (x1 == 1 and x2 == 1):\n return False\n \n d1 = x1 // lcd\n d2 = x2 // lcd\n \n print(d1, d2)\n \n x1ok = False\n x2ok = False\n\n if lcd % d1 == 0:\n x1ok = True\n \n if lcd % d2 == 0:\n x2ok = True\n \n if x1ok and x2ok:\n return True\n \n lcdprime = UniquePrimeDivisors(lcd)\n \n if not x1ok:\n d1prime = UniquePrimeDivisors(d1)\n x1ok = lcdprime.intersection(d1prime) == d1prime \n \n if not x2ok:\n d2prime = UniquePrimeDivisors(d2)\n x2ok = lcdprime.intersection(d2prime) == d2prime\n \n return x1ok and x2ok \n \ndef solution(a, b):\n if a == None:\n raise ValueError()\n \n if b == None:\n raise ValueError()\n \n if len(a) != len(b):\n raise ValueError()\n \n counter = 0\n \n for xa, xb in zip(a, b):\n print(xa)\n print(xb)\n if HaveAllCommonPrimeDivisorsExt2(xa, xb):\n print(True)\n counter += 1\n\n print(\"###\")\n \n return counter"
},
{
"alpha_fraction": 0.4979591965675354,
"alphanum_fraction": 0.5326530337333679,
"avg_line_length": 22.878047943115234,
"blob_id": "048fa4aa755391c2180bd147ab007e69b813387d",
"content_id": "be2b263b9c927914da41ec557ab4c34a02a1d7fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 41,
"path": "/Python/NumberSolitaire.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n\n# https://app.codility.com/programmers/lessons/17-dynamic_programming/number_solitaire/\n# https://app.codility.com/demo/results/training8D4P2J-N9F/\n\nimport sys\n\ndef NumberSolitaire(arr):\n if arr == None:\n raise ValueError\n \n arrL = len(arr)\n \n if arrL < 1:\n raise ValueError\n \n if arrL == 1:\n return arr[0]\n \n currentSum = arr[0]\n \n result = []\n result.append(arr[0])\n \n for ix,v in enumerate(arr[1:]):\n baseIndex = ix + 1\n \n innerMax = -sys.maxsize\n resultL = len(result)\n for ix in list(range(1,7)):\n index = baseIndex - ix\n if index >= 0 and index < resultL:\n currentMax = result[index] + v\n if currentMax > innerMax:\n innerMax = currentMax\n \n result.append(innerMax)\n \n return result[-1]\n\nNumberSolitaire([1, -2, 0, 9, -1, -2])\n\nNumberSolitaire([1,0,0,0,0,1,0,0,0,0,1])"
},
{
"alpha_fraction": 0.47893568873405457,
"alphanum_fraction": 0.49889135360717773,
"avg_line_length": 20.5238094329834,
"blob_id": "46810c8b0ec99b4300949738d482a268b3274b4a",
"content_id": "351604333dc7d83ee8bb51cc29066110bf0a90c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 21,
"path": "/Python/PermCheck.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "#https://app.codility.com/demo/results/trainingW7RWX8-HEG/\nimport sys\n\ndef perm_check(arr):\n m = {}\n minValue = sys.maxsize\n maxValue = -1\n for x in arr:\n if x < minValue:\n minValue = x\n \n if x > maxValue:\n maxValue = x\n \n m[x] = True\n if minValue == 1 and maxValue == len(arr) and len(m) == len(arr):\n return 1\n else:\n return 0\n \n#perm_check([1,3,2])"
},
{
"alpha_fraction": 0.6403872966766357,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 19.08333396911621,
"blob_id": "b97ba5e8ba1512a9ab6e56fb39771e5a9bcf3fa5",
"content_id": "6b49840b6bf01da505d614ced5ed764449c560ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 36,
"path": "/Codility.Training.Tests/TapeEquilibriumTests.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n public class TapeEquilibriumTests\n {\n\t\tprivate readonly TapeEquilibrium _target = new TapeEquilibrium();\n\n\t\t[TestMethod]\n\t\tpublic void TestTaskExample()\n\t\t{\n\t\t\tAssert.AreEqual(1, _target.Solve(new Int32[] { 3, 1, 2, 4, 3}));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestEmpty()\n\t\t{\n\t\t\tAssert.AreEqual(0, _target.Solve(new Int32[] { }));\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestSingle()\n\t\t{\n\t\t\tfor (Int32 q = -10; q < 10; q++)\n\t\t\t{\n\t\t\t\tAssert.AreEqual(q, _target.Solve(new Int32[] { q }));\n\t\t\t}\n\t\t}\n }\n}\n"
},
{
"alpha_fraction": 0.511904776096344,
"alphanum_fraction": 0.538165271282196,
"avg_line_length": 16.962265014648438,
"blob_id": "b8945f3375c0cc48c5c81b170ebe7fe3ecb3bad8",
"content_id": "26de48998685c63bd5242bfbe307f28d4c95c750",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2858,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 159,
"path": "/Codility.Training/GenomicRangeQuery.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task GenomicRangeQuery\n\t/// https://codility.com/demo/take-sample-test/genomic_range_query\n\t/// and here are results\n\t/// https://codility.com/demo/results/demoPSXTCR-WK2/\n\t/// </summary>\n\tpublic sealed class GenomicRangeQuery\n\t{\n\t\tprivate static Int32[] EmptyResult = new Int32[0];\n\n\t\tpublic Int32[] Solve(String input, Int32[] p, Int32[] w)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (String.IsNullOrWhiteSpace(input))\n\t\t\t{\n\t\t\t\tthrow new ArgumentException(\"input\");\n\t\t\t}\n\n\t\t\tif (null == p && null == w)\n\t\t\t{\n\t\t\t\treturn EmptyResult;\n\t\t\t}\n\n\t\t\tif (p.Length != w.Length)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"w\");\n\t\t\t}\n\n\t\t\tif (p.Length == 0 && w.Length == 0)\n\t\t\t{\n\t\t\t\treturn EmptyResult;\n\t\t\t}\n\n\t\t\tChar[] chars = input.ToCharArray();\n\n\t\t\t//////////////////////////////////////////////////////////////////////////////////////\n\t\t\t//// prepare summaries\n\t\t\t////\n\n\t\t\tInt32[,] summaries = new Int32[chars.Length, 4];\n\n\t\t\tfor (Int32 q = 0; q < chars.Length; q++)\n\t\t\t{\n\t\t\t\tChar current = chars[q];\n\n\t\t\t\tif (q > 0)\n\t\t\t\t{\n\t\t\t\t\tsummaries[q, 0] = summaries[q - 1, 0];\n\t\t\t\t\tsummaries[q, 1] = summaries[q - 1, 1];\n\t\t\t\t\tsummaries[q, 2] = summaries[q - 1, 2];\n\t\t\t\t\tsummaries[q, 3] = summaries[q - 1, 3];\n\t\t\t\t}\n\n\t\t\t\tsummaries[q, GetIndex(current)]++;\n\t\t\t}\n\n\t\t\tInt32[] result = new Int32[p.Length];\n\n\t\t\tInt32[] currentDiff = new Int32[4];\n\n\t\t\tfor (Int32 q = 0; q < result.Length; q++)\n\t\t\t{\n\t\t\t\tInt32 indexP = p[q];\n\n\t\t\t\tInt32 indexW = w[q];\n\n\t\t\t\tif (indexP < 0 || indexP >= chars.Length)\n\t\t\t\t{\n\t\t\t\t\tthrow new ArgumentOutOfRangeException(\"p \" + q);\n\t\t\t\t}\n\n\t\t\t\tif (indexW < 0 || indexW >= chars.Length)\n\t\t\t\t{\n\t\t\t\t\tthrow new ArgumentOutOfRangeException(\"w \" + q);\n\t\t\t\t}\n\n\t\t\t\tfor (Int32 ix = 0; ix < 4; ix++)\n\t\t\t\t{\n\t\t\t\t\tcurrentDiff[ix] = summaries[indexW, ix] - summaries[indexP, ix];\n\t\t\t\t}\n\n\t\t\t\tcurrentDiff[GetIndex(chars[indexP])]++;\n\n\t\t\t\tfor (Int32 ix = 0; ix < 4; ix++)\n\t\t\t\t{\n\t\t\t\t\tif (currentDiff[ix] > 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tresult[q] = ix + 1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn result;\n\t\t}\n\n\t\tprivate static Int32 GetIndex(Char ch)\n\t\t{\n\t\t\tswitch (ch)\n\t\t\t{\n\t\t\t\tcase 'a':\n\t\t\t\tcase 'A':\n\t\t\t\t\treturn 0;\n\n\t\t\t\tcase 'c':\n\t\t\t\tcase 'C':\n\t\t\t\t\treturn 1;\n\n\t\t\t\tcase 'g':\n\t\t\t\tcase 'G':\n\t\t\t\t\treturn 2;\n\n\t\t\t\tcase 't':\n\t\t\t\tcase 'T':\n\t\t\t\t\treturn 3;\n\n\t\t\t\tdefault:\n\t\t\t\t\tthrow new ArgumentOutOfRangeException(\"ch\");\n\t\t\t}\n\t\t}\n\n\t\tprivate static Int32 GetImpactFactor(Char ch)\n\t\t{\n\t\t\tswitch (ch)\n\t\t\t{\n\t\t\t\tcase 'a':\n\t\t\t\tcase 'A':\n\t\t\t\t\treturn 1;\n\n\t\t\t\tcase 'c':\n\t\t\t\tcase 'C':\n\t\t\t\t\treturn 2;\n\n\t\t\t\tcase 'g':\n\t\t\t\tcase 'G':\n\t\t\t\t\treturn 3;\n\n\t\t\t\tcase 't':\n\t\t\t\tcase 'T':\n\t\t\t\t\treturn 4;\n\n\t\t\t\tdefault:\n\t\t\t\t\tthrow new ArgumentOutOfRangeException(\"ch\");\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.49046793580055237,
"alphanum_fraction": 0.5129982829093933,
"avg_line_length": 25.904762268066406,
"blob_id": "f170ca4da508263b6665896473f113a9307090ea",
"content_id": "f45381143d8142f9bbf95ec87d794b6b8d9831fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 21,
"path": "/Python/Nesting.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# Nesting task from Codility\n# https://app.codility.com/programmers/lessons/7-stacks_and_queues/nesting/\n# https://app.codility.com/demo/results/trainingX2CG68-Z6K/\n\ndef Nesting(input):\n counter = 0\n for ch in input:\n if ch == '(':\n counter += 1\n elif ch == ')':\n if counter > 0:\n counter -= 1\n else:\n return 0\n else:\n raise ValueError(\"Input string contains invalid character: \" + ch)\n \n if counter == 0:\n return 1\n else:\n return 0\n "
},
{
"alpha_fraction": 0.5805084705352783,
"alphanum_fraction": 0.6084745526313782,
"avg_line_length": 18.032258987426758,
"blob_id": "9b92e66a844d5f15313beb0d181f6936cff0abd0",
"content_id": "f6b50deed23d0a5fb25160b744b41c29eecfeba6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1182,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 62,
"path": "/Codility.Training/PassingCars.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task MaxSliceSum\n\t/// https://codility.com/demo/take-sample-test/passing_cars\n\t/// and here are results\n\t/// https://codility.com/demo/results/demoR6FDN6-7BZ/\n\t/// </summary>\n\tpublic sealed class PassingCars\n\t{\n\t\tpublic Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentException(\"input is empty\");\n\t\t\t}\n\n\t\t\tInt32 passingCars = 0;\n\n\t\t\tInt32 currentEasters = 0;\n\n\t\t\tfor (Int32 q = 0; q < input.Length; q++)\n\t\t\t{\n\t\t\t\tInt32 currentCar = input[q];\n\n\t\t\t\tif (currentCar >= 0 && currentCar <= 1)\n\t\t\t\t{\n\t\t\t\t\tif (currentCar == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tcurrentEasters++;\n\t\t\t\t\t}\n\t\t\t\t\telse //// wester - he should pass all counted easters\n\t\t\t\t\t{\n\t\t\t\t\t\tpassingCars += currentEasters;\n\n\t\t\t\t\t\tif (passingCars > 1000000000)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\treturn -1;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tthrow new ApplicationException(\"Buldozer on the read \" + currentCar);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn passingCars;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.49594375491142273,
"alphanum_fraction": 0.5062195658683777,
"avg_line_length": 22.602563858032227,
"blob_id": "af3a99bb0ea3047fcffe41e46265462c6b0a4d3a",
"content_id": "1dfd3afa4a7fa68b913c8e8c60a2e070fbf2e75b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1849,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 78,
"path": "/Python/MaxSliceSum.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# MaxSliceSum\n# https://app.codility.com/programmers/lessons/9-maximum_slice_problem/max_slice_sum/\n# https://app.codility.com/demo/results/trainingEVDBT5-B3H/\n# it is marked as Painless but it took me over 2 hours to solve it\n\nimport sys\n\ndef is_negative(x):\n return x < 0\n\ndef MaxSliceSum(arr):\n if None == arr:\n raise ValueError\n \n arrLen = len(arr)\n \n if arrLen <= 0:\n return 0\n \n if arrLen == 1:\n return arr[0]\n \n allNegative = True\n maxNumber = -sys.maxsize\n\n currentBlob = 0\n first = True\n \n mergedRanges = []\n \n for x in arr:\n if x > maxNumber:\n maxNumber = x\n \n if x >= 0:\n allNegative = False\n \n if first:\n currentBlob = x\n first = False\n else:\n if is_negative(x) == is_negative(currentBlob):\n currentBlob += x\n else:\n mergedRanges.append(currentBlob)\n currentBlob = x\n \n mergedRanges.append(currentBlob)\n\n if allNegative:\n return maxNumber\n\n currentMerged = 0\n \n mergedRangesLen = len(mergedRanges)\n maxSliceSum = 0\n first = True\n \n for (ix, v) in enumerate(mergedRanges):\n if v >= 0:\n currentMerged += v\n else:\n if not first:\n if ix < mergedRangesLen - 1:\n if (mergedRanges[ix + 1] + v >= 0) and (currentMerged + v >= 0):\n currentMerged += v\n else:\n currentMerged = 0\n \n if currentMerged > maxSliceSum:\n maxSliceSum = currentMerged\n \n first = False\n \n if currentMerged > maxSliceSum:\n maxSliceSum = currentMerged\n \n return maxSliceSum \n "
},
{
"alpha_fraction": 0.5175718665122986,
"alphanum_fraction": 0.5335463285446167,
"avg_line_length": 30.200000762939453,
"blob_id": "dc7329ccb06ecb5ec31b55ccedd59ac894245905",
"content_id": "648b1f563bce6d70bda8173ce14afc6f77eb9f30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 10,
"path": "/Python/CountDiv.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n\n# CountDiv task\n# https://app.codility.com/programmers/lessons/5-prefix_sums/count_div/\n# https://app.codility.com/demo/results/trainingQTS6GE-AFX/\ndef solution(a, b, k):\n if (b % k) != 0:\n b = b - b % k\n if (a % k) != 0:\n a = a + k - (a % k) \n res = res = (b - a) // k + 1\n return res"
},
{
"alpha_fraction": 0.597254753112793,
"alphanum_fraction": 0.6253766417503357,
"avg_line_length": 18.270967483520508,
"blob_id": "03470948bf2bb689f47bed7a1b610c8fff2adfb4",
"content_id": "94db0e39cf840b04d8231ff7ef4173b7dcd49166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2989,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 155,
"path": "/Codility.Training/CountSemiprimes.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task CountSemiprimes\n\t/// https://codility.com/demo/take-sample-test/count_semiprimes\n\t/// and here are results\n\t/// https://codility.com/demo/results/demoFGQSZ4-QTP/\n\t/// </summary>\n\tpublic sealed class CountSemiprimes\n\t{\n\t\tprivate class NumberInfo\n\t\t{\n\t\t\tinternal Int32 Value { get; set; }\n\n\t\t\tinternal Int32 CurrentValue { get; set; }\n\t\t}\n\n\t\tpublic Int32[] Solve(Int32 maxN, Int32[] rangeP, Int32[] rangeQ)\n\t\t{\n\t\t\tif (null == rangeP)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"rangeP\");\n\t\t\t}\n\n\t\t\tif (null == rangeQ)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"rangeQ\");\n\t\t\t}\n\n\t\t\tif (rangeP.Length != rangeQ.Length)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"rangeQ\");\n\t\t\t}\n\n\t\t\tif (rangeP.Length == 0)\n\t\t\t{\n\t\t\t\treturn new Int32[0];\n\t\t\t}\n\n\t\t\tInt32[] result = new Int32[rangeP.Length];\n\n\t\t\tDictionary<Int32, NumberInfo> semiPrimesList = GetSemiPrimes(maxN);\n\n\t\t\tInt32[] subs = new Int32[maxN + 2];\n\n\t\t\tInt32 currentSemiprimes = 0;\n\n\t\t\tfor (Int32 q = 0; q < subs.Length; q++)\n\t\t\t{\n\t\t\t\tsubs[q] = currentSemiprimes;\n\n\t\t\t\tif (semiPrimesList.ContainsKey(q))\n\t\t\t\t{\n\t\t\t\t\tcurrentSemiprimes++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor (Int32 ix = 0; ix < rangeP.Length; ix++)\n\t\t\t{\n\t\t\t\tInt32 p = rangeP[ix];\n\n\t\t\t\tInt32 q = rangeQ[ix];\n\n\t\t\t\tresult[ix] = subs[q + 1] - subs[p];\n\t\t\t}\n\n\t\t\treturn result;\n\t\t}\n\n\t\tprivate static Dictionary<Int32, NumberInfo> GetSemiPrimes(Int32 maxN)\n\t\t{\n\t\t\tDictionary<Int32, NumberInfo> primes = new Dictionary<Int32, NumberInfo>();\n\n\t\t\tDictionary<Int32, NumberInfo> semiPrimes = new Dictionary<Int32, NumberInfo>();\n\n\t\t\tList<Int32> numbersList = new List<Int32>();\n\n\t\t\tfor (Int32 q = 2; q <= maxN; q++)\n\t\t\t{\n\t\t\t\tprimes.Add(q, new NumberInfo\n\t\t\t\t{\n\t\t\t\t\tValue = q,\n\t\t\t\t\tCurrentValue = q\n\t\t\t\t});\n\n\t\t\t\tnumbersList.Add(q);\n\t\t\t}\n\n\t\t\tList<Int32> itemsToMove = new List<Int32>();\n\n\t\t\tInt32 limit = (Int32)Math.Ceiling(Math.Sqrt(maxN));\n\n\t\t\twhile (numbersList.Count > 0)\n\t\t\t{\n\t\t\t\tInt32 currentItem = numbersList[0];\n\n\t\t\t\tif (!primes.ContainsKey(currentItem))\n\t\t\t\t{\n\t\t\t\t\tnumbersList.RemoveAt(0);\n\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tif (currentItem > limit)\n\t\t\t\t{\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\titemsToMove.Clear();\n\n\t\t\t\tforeach (KeyValuePair<Int32, NumberInfo> pair in primes)\n\t\t\t\t{\n\t\t\t\t\tif (pair.Value.CurrentValue % currentItem == 0 && pair.Key != currentItem)\n\t\t\t\t\t{\n\t\t\t\t\t\tpair.Value.CurrentValue /= currentItem;\n\n\t\t\t\t\t\titemsToMove.Add(pair.Key);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tforeach (Int32 item in itemsToMove)\n\t\t\t\t{\n\t\t\t\t\tsemiPrimes.Add(item, primes[item]);\n\n\t\t\t\t\tprimes.Remove(item);\n\t\t\t\t}\n\n\t\t\t\tnumbersList.RemoveAt(0);\n\t\t\t}\n\n\t\t\titemsToMove.Clear();\n\n\t\t\tforeach (KeyValuePair<Int32, NumberInfo> pair in semiPrimes)\n\t\t\t{\n\t\t\t\tif (!primes.ContainsKey(pair.Value.CurrentValue))\n\t\t\t\t{\n\t\t\t\t\titemsToMove.Add(pair.Key);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tforeach (Int32 item in itemsToMove)\n\t\t\t{\n\t\t\t\tsemiPrimes.Remove(item);\n\t\t\t}\n\n\t\t\treturn semiPrimes;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5632184147834778,
"alphanum_fraction": 0.6130267977714539,
"avg_line_length": 19.153846740722656,
"blob_id": "419bb6ecba45af4ae76e7c3714ebe5a37c1def77",
"content_id": "b523cf1d07a550250610d2b088e7fb28bffd0187",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 13,
"path": "/Python/OddOccurrencesInArray.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "#https://app.codility.com/demo/results/trainingS88NEB-82D/\ndef odd_occurrences_in_array(arr):\n if arr == []:\n return 0\n\n result = 0\n\n for x in arr:\n result = result ^ x\n\n return result\n\n#print(odd_occurrences_in_array([9,3,9,3,9,7,9]))"
},
{
"alpha_fraction": 0.5471777319908142,
"alphanum_fraction": 0.5758213996887207,
"avg_line_length": 30.639999389648438,
"blob_id": "3277ea7711a7b61843238e5d3328a0c557d2fe95",
"content_id": "fb49d0af2269bdec83f76fdfbbf857d909530467",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2374,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 75,
"path": "/Python/FlagsAndPeaks.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n# https://app.codility.com/programmers/lessons/10-prime_and_composite_numbers/flags/\n# https://app.codility.com/demo/results/trainingR7DR65-4KT/\n# https://app.codility.com/demo/results/trainingR7MBGU-ZPE/\n# correctness 100%\n# performance 28%\n# task score 66%\n\ndef find_peaks_distances(arrin):\n if arrin is None:\n raise ValueError\n \n arr = [1000000001] + arrin + [1000000001]\n \n previous_level = 1000000001\n was_greater = False\n previous_peak = -1\n min_distance = 1000000\n max_distance = 0\n \n peaks = []\n distances = []\n for ix, x in enumerate(arr):\n if (x < previous_level) and was_greater:\n last_peak = ix - 1 - 1 \n peaks.append(last_peak)\n \n if previous_peak >= 0:\n peak_distance = last_peak - previous_peak\n distances.append(peak_distance)\n \n if peak_distance < min_distance:\n min_distance = peak_distance\n \n if peak_distance > max_distance:\n max_distance = peak_distance\n \n previous_peak = last_peak\n \n was_greater = x > previous_level\n previous_level = x\n\n return (peaks, distances, min_distance, max_distance)\n\ndef solution(arr):\n peaks, distances, min_distance, max_distance = find_peaks_distances(arr)\n \n if len(peaks) <= 0:\n return 0\n \n max_flags = len(peaks)\n min_flags = 1\n \n if max_flags > min_distance: # can not use max_flags due to minimum distance condition requirement\n max_flags = max_flags - 1\n \n maxAchievedFlagsCount = 0\n \n for currentFlagMinDist in range(max_flags, min_flags - 1, -1):\n if currentFlagMinDist < maxAchievedFlagsCount:\n return maxAchievedFlagsCount\n \n allFlagsCount = currentFlagMinDist\n flagsCount = 1\n cumulatedDist = 0\n for peakDist in distances:\n cumulatedDist = cumulatedDist + peakDist\n if cumulatedDist >= currentFlagMinDist:\n flagsCount = flagsCount + 1\n if flagsCount > maxAchievedFlagsCount:\n maxAchievedFlagsCount = flagsCount\n if flagsCount >= allFlagsCount:\n return allFlagsCount\n cumulatedDist = 0\n\n return 1\n"
},
{
"alpha_fraction": 0.5263774991035461,
"alphanum_fraction": 0.5404455065727234,
"avg_line_length": 22.72222137451172,
"blob_id": "d45171c8ae5a21fdc36cc4e32a1983865e876d75",
"content_id": "af844925d397d981ea2ce1db381b1c8a79c58f89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 36,
"path": "/Python/Dominator.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# Task: https://app.codility.com/c/run/trainingAWNVAU-D3U\n# Result: https://app.codility.com/demo/results/trainingAWNVAU-D3U/\n\nfrom collections import defaultdict\n\ndef solution(arr):\n if arr == None:\n raise ValueError\n \n arr_len = len(arr)\n \n if (arr_len <= 0):\n return -1\n \n if (arr_len == 1):\n return 0\n \n values = defaultdict(int)\n indexes = defaultdict(list)\n current_max = -1\n current_max_count = 0\n \n for ix, a in enumerate(arr):\n new_count = values[a] + 1\n values[a] = new_count\n indexes[a].append(ix)\n \n if (new_count > current_max_count):\n current_max = a\n current_max_count = new_count\n \n \n if (current_max_count > (arr_len / 2)):\n return indexes[current_max][0]\n \n return -1"
},
{
"alpha_fraction": 0.4586666524410248,
"alphanum_fraction": 0.46933332085609436,
"avg_line_length": 23.866666793823242,
"blob_id": "d38687748122fcc5535780ff09ab662416491787",
"content_id": "e5b2a9d6e88e925ff0660be0022f4962fa11027e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 750,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 30,
"path": "/Python/StoneWall.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# https://app.codility.com/programmers/lessons/7-stacks_and_queues/stone_wall/\n# https://app.codility.com/demo/results/trainingQBVARR-MTN/\n\ndef StoneWall(wall):\n if wall == None:\n raise valueError()\n \n if len(wall) <= 1:\n return len(wall)\n \n blocks = []\n blockCount = 0\n \n for ix,h in enumerate(wall):\n while len(blocks) > 0:\n last = blocks[-1]\n if (last > h):\n blocks.pop()\n elif last == h:\n break\n else:\n blocks.append(h)\n blockCount += 1\n break\n \n if len(blocks) <= 0:\n blockCount += 1\n blocks.append(h)\n \n return blockCount\n "
},
{
"alpha_fraction": 0.5315315127372742,
"alphanum_fraction": 0.6036036014556885,
"avg_line_length": 21.266666412353516,
"blob_id": "148c970305081d6c6f7fb5aabb302774c27d6a56",
"content_id": "acdc356cb2e91a307db54976a3e63e9f738e4744",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 333,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 15,
"path": "/Python/FrogJmp.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "import math\n\n#https://app.codility.com/demo/results/trainingHMVFK8-43N/\ndef frog_jmp(x, y, d):\n if x < 1 or y < x or d < 1:\n raise ValueError\n\n diff = y - x\n return math.ceil(diff / d)\n \n#print(frog_jmp(10,85,30))\n#print(frog_jmp(1,2,10))\n#print(frog_jmp(1,4,2))\n#print(frog_jmp(1,5,2))\n#print(frog_jmp(1,2,1))"
},
{
"alpha_fraction": 0.5382775068283081,
"alphanum_fraction": 0.5558213591575623,
"avg_line_length": 23.134614944458008,
"blob_id": "43d699b460181190b50a9949b1914dbaa4ceda0d",
"content_id": "ece5b14a889ec833e32e4c6f7a35aa77cc4df2d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1254,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 52,
"path": "/Python/Peaks.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# task: https://app.codility.com/programmers/lessons/10-prime_and_composite_numbers/peaks/\n# solution: https://app.codility.com/demo/results/training7JHSRM-NBT/\n\nimport math\nfrom itertools import islice\n\ndef chunk(it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\ndef checkSlicing(sliceLen, flags):\n chunks = chunk(flags, sliceLen)\n return all(any(x == 1 for x in ch) for ch in chunks)\n\ndef solution(arr: list) -> int:\n if arr == None:\n raise valueError\n \n arrLen = len(arr)\n \n if arrLen <= 2:\n return 0\n \n flags = [0] * arrLen\n \n peaks = set()\n \n for ix in range(1, arrLen - 1):\n if (arr[ix] > arr[ix - 1]) and (arr[ix] > arr[ix + 1]):\n peaks.add(ix)\n flags[ix] = 1\n \n peaksCount = len(peaks)\n \n if peaksCount < 1:\n return 0\n \n if peaksCount == 1:\n return 1\n \n maxValue = math.floor(arrLen / 2)\n \n if maxValue < 2:\n return 1\n \n for sliceLen in range(2, maxValue + 1):\n blocks = arrLen / sliceLen\n if math.floor(blocks) == blocks:\n if checkSlicing(math.floor(sliceLen), flags):\n return math.floor(blocks)\n \n return 1"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.48148149251937866,
"avg_line_length": 18.33333396911621,
"blob_id": "ba8170f9d55284b13abf33277e36afd5dd3014d1",
"content_id": "f54b1ffdb82c6c50405c9bb5e52cd50b0c94bb10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 21,
"path": "/Python/FrogRiverOne.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "#https://app.codility.com/demo/results/training3DS4KM-SWE/\ndef frog_river_one(x, arr):\n if x < 1:\n raise ValueError\n\n if arr == []:\n raise ValueError\n\n si = 0\n m = set([])\n\n for ix,v in enumerate(arr):\n if v not in m:\n m.add(v)\n si += 1\n if si >= x:\n return ix\n\n return -1\n\n#print(frog_river_one(5, [1,3,1,4,2,3,5,4]))"
},
{
"alpha_fraction": 0.5242494344711304,
"alphanum_fraction": 0.556581974029541,
"avg_line_length": 22.94444465637207,
"blob_id": "e05279bc55908e85e7121230082a4b33755e7d10",
"content_id": "b9b6e0215181e4b12dc03363a6656da10dbc8eda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 18,
"path": "/Python/MaxProductOfThree.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n\n\n# https://app.codility.com/programmers/lessons/6-sorting/max_product_of_three/\n# https://app.codility.com/demo/results/trainingNCDRZJ-HQE/\n\ndef MaxProductOfThree(arr):\n if arr == None:\n raise ValueError\n \n if len(arr) < 3:\n raise ValueError\n \n arr.sort()\n r1 = arr[0] * arr[1] * arr[-1]\n r2 = arr[-1] * arr[-2] * arr[-3]\n \n if r1 > r2:\n return r1\n else:\n return r2"
},
{
"alpha_fraction": 0.5743145942687988,
"alphanum_fraction": 0.60317462682724,
"avg_line_length": 29.15217399597168,
"blob_id": "ee909203404cd817f308bf55ae7d239b47e96404",
"content_id": "364a92fee3451556130e63cc00bf1b3a3e5a961b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1386,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 46,
"path": "/Python/NumberOfDiscIntersections.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# Task: https://app.codility.com/programmers/lessons/6-sorting/number_of_disc_intersections/\n# Results: https://app.codility.com/demo/results/trainingUKDRUY-65Z/\n\nfrom collections import defaultdict\n\ndef NumberOfDiscIntersections(arr):\n disks = [(ix - x, ix + x) for ix, x in enumerate(arr)]\n edges = []\n disksBegin = defaultdict(list)\n disksEnd = defaultdict(list)\n \n intersections = 0\n currentlyOpenDisks = 0\n \n for d in disks:\n e1,e2 = d\n edges.append(e1)\n edges.append(e2)\n \n disksBegin[e1].append(d)\n disksEnd[e2].append(d)\n \n edges = sorted(list(set(edges)))\n \n for e in edges:\n openingDisks = len(disksBegin[e])\n\n if (openingDisks > 0 and currentlyOpenDisks > 0):\n intersections = intersections + currentlyOpenDisks * openingDisks\n if intersections > 10000000:\n return -1\n \n currentlyOpenDisks = currentlyOpenDisks + openingDisks\n \n if (openingDisks > 1):\n intersections = intersections + openingDisks * (openingDisks - 1) // 2\n if intersections > 10000000:\n return -1\n \n closingDisks = len(disksEnd[e])\n \n currentlyOpenDisks = currentlyOpenDisks - closingDisks\n \n return intersections\n\n#NumberOfDiscIntersections([1,5,2,1,4,0])"
},
{
"alpha_fraction": 0.5095541477203369,
"alphanum_fraction": 0.524840772151947,
"avg_line_length": 20.83333396911621,
"blob_id": "0cbda60b523fc85d2d46169aeef0bc8227c40445",
"content_id": "528992a25592d76436d0ed4fba3ba116e781334d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 785,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 36,
"path": "/Python/MaxProfit.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "# MaxProfit\n# https://app.codility.com/programmers/lessons/9-maximum_slice_problem/max_profit/\n# https://app.codility.com/demo/results/trainingEWJ9TM-QJH/\n\ndef solution(arr):\n if None == arr:\n raise ValueError\n \n arrLen = len(arr)\n \n if arrLen < 2:\n return 0\n \n maxArr = [0] * arrLen\n \n maxValue = 0\n \n for ix in range(arrLen - 1, -1, -1):\n currentValue = arr[ix]\n if currentValue > maxValue:\n maxValue = currentValue\n \n maxArr[ix] = maxValue\n \n if maxValue <= 0:\n return 0\n \n maxResult = 0\n \n for ix in range(arrLen):\n diff = maxArr[ix] - arr[ix]\n \n if diff > maxResult:\n maxResult = diff\n \n return maxResult"
},
{
"alpha_fraction": 0.5059808492660522,
"alphanum_fraction": 0.5215311050415039,
"avg_line_length": 21,
"blob_id": "7fee94f6a9625c68ec729127a6bc1bacaf17202f",
"content_id": "08306477bc22a61c4daba9f53149a330dc815add",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 38,
"path": "/Python/CommonPrimeDivisors84.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n#\n# https://app.codility.com/programmers/lessons/12-euclidean_algorithm/common_prime_divisors/\n# https://app.codility.com/demo/results/trainingKEBYGS-74H/\n#\n\ndef UniquePrimeDivisors(x):\n result = set()\n divisor = 2\n while divisor < x:\n if x % divisor == 0:\n result.add(divisor)\n x = x / divisor\n else:\n divisor += 1\n result.add(int(x))\n return result\n\ndef solution(a, b):\n if a == None:\n raise ValueError()\n \n if b == None:\n raise ValueError()\n \n if len(a) != len(b):\n raise ValueError()\n \n counter = 0\n \n for ix, xa in enumerate(a):\n xb = b[ix]\n s1 = UniquePrimeDivisors(xa)\n s2 = UniquePrimeDivisors(xb)\n \n if s1 == s2:\n counter += 1\n \n return counter"
},
{
"alpha_fraction": 0.5976454019546509,
"alphanum_fraction": 0.6405817270278931,
"avg_line_length": 19.927536010742188,
"blob_id": "d5b586504542748b9825ccc1ee385353606805d8",
"content_id": "86cc5bfb6db2f472bd863ad6d5a654e7b84836e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1446,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 69,
"path": "/Codility.Training.Tests/GenomicRangeQueryTest.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class GenomicRangeQueryTest\n\t{\n\t\tprivate readonly GenomicRangeQuery _target = new GenomicRangeQuery();\n\n\t\t[TestMethod]\n\t\tpublic void TestNull()\n\t\t{\n\t\t\tInt32[] result = _target.Solve(\"ACGT\", null, null);\n\n\t\t\tAssert.IsNotNull(result);\n\n\t\t\tAssert.AreEqual(0, result.Length);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestEmpty()\n\t\t{\n\t\t\tInt32[] result = _target.Solve(\"ACGT\", new Int32[0], new Int32[0]);\n\n\t\t\tAssert.IsNotNull(result);\n\n\t\t\tAssert.AreEqual(0, result.Length);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void SampleTest()\n\t\t{\n\t\t\tInt32[] result = _target.Solve(\"CAGCCTA\", new Int32[] { 2, 5, 0 }, new Int32[] { 4, 5, 6});\n\n\t\t\tAssert.IsNotNull(result);\n\n\t\t\tInt32[] expected = new Int32[] { 2, 4, 1 };\n\n\t\t\tAssert.AreEqual(3, result.Length);\n\n\t\t\tfor (Int32 q = 0; q < result.Length; q++)\n\t\t\t{\n\t\t\t\tAssert.AreEqual(expected[q], result[q]);\n\t\t\t}\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void SampleTest2()\n\t\t{\n\t\t\tInt32[] result = _target.Solve(\"CAGCCTA\", new Int32[] { 0, 0, 0, 2 }, new Int32[] { 0, 1, 2, 6 });\n\n\t\t\tAssert.IsNotNull(result);\n\n\t\t\tInt32[] expected = new Int32[] { 2, 1, 1, 1 };\n\n\t\t\tAssert.AreEqual(4, result.Length);\n\n\t\t\tfor (Int32 q = 0; q < result.Length; q++)\n\t\t\t{\n\t\t\t\tAssert.AreEqual(expected[q], result[q]);\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5080240964889526,
"alphanum_fraction": 0.5346038341522217,
"avg_line_length": 13.992481231689453,
"blob_id": "bbe1f1b1b4adc514ef5b04b5269dcd9e406d1cc5",
"content_id": "bf7846fb26a81654fe36c4715ed340a466405b7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1996,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 133,
"path": "/Codility.Training/ChocolatesByNumbers.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the codility task:\n\t/// https://codility.com/demo/take-sample-test/chocolates_by_numbers\n\t/// with results\n\t/// https://codility.com/demo/results/demoX7HBP9-4NJ/\n\t/// </summary>\n\tpublic sealed class ChocolatesByNumbers\n\t{\n\t\tpublic Int32 Solve(Int32 n, Int32 m)\n\t\t{\n\t\t\treturn (Int32)SolveImpl(n, m);\n\t\t}\n\n\t\tprivate Int64 SolveImpl(Int64 n, Int64 m)\n\t\t{\n\t\t\tConsole.WriteLine(\"{0} {1}\", n, m);\n\n\t\t\tif (n <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"n\");\n\t\t\t}\n\n\t\t\tif (m <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"m\");\n\t\t\t}\n\n\t\t\tif (n >= m)\n\t\t\t{\n\t\t\t\tif (n % m == 0)\n\t\t\t\t{\n\t\t\t\t\treturn n / m;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tInt64 mult = (Int64)LeastCommonMultiplication(n, m);\n\n\t\t\t\t\treturn (mult / m);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tif (m % n == 0)\n\t\t\t\t{\n\t\t\t\t\treturn 1;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\treturn SolveImpl(n, m % n);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tpublic static Int64 LargestCommonDivisor(Int64 a, Int64 b)\n\t\t{\n\t\t\tif (a <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"a\");\n\t\t\t}\n\n\t\t\tif (b <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"b\");\n\t\t\t}\n\n\t\t\tif (a == b)\n\t\t\t{\n\t\t\t\treturn a;\n\t\t\t}\n\n\t\t\tif (a > b)\n\t\t\t{\n\t\t\t\tif (a % b == 0)\n\t\t\t\t{\n\t\t\t\t\treturn b;\n\t\t\t\t}\n\n\t\t\t\treturn LargestCommonDivisor(a % b, b);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tif (b % a == 0)\n\t\t\t\t{\n\t\t\t\t\treturn a;\n\t\t\t\t}\n\n\t\t\t\treturn LargestCommonDivisor(b % a, a);\n\t\t\t}\n\t\t}\n\n\t\tpublic static Int64 LeastCommonMultiplication(Int64 a, Int64 b)\n\t\t{\n\t\t\tif (a <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"a\");\n\t\t\t}\n\n\t\t\tif (b <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"b\");\n\t\t\t}\n\n\t\t\tif (a == b)\n\t\t\t{\n\t\t\t\treturn a;\n\t\t\t}\n\n\t\t\tif (a >= b)\n\t\t\t{\n\t\t\t\tif (a % b == 0)\n\t\t\t\t{\n\t\t\t\t\treturn a;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\treturn (Int64)a * (Int64)b / (Int64)LargestCommonDivisor(a, b);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\treturn LeastCommonMultiplication(b, a);\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5966386795043945,
"alphanum_fraction": 0.6190476417541504,
"avg_line_length": 17.465517044067383,
"blob_id": "ff7eff77216f6e5f751aa0aef8e322cdeba6dde1",
"content_id": "ffa8819b0c49e6fec4d21fd77a27820732b62d0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 58,
"path": "/Codility.Training/FrogRiverOne.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task FrogRiverOne\n\t/// https://codility.com/demo/take-sample-test/frog_river_one\n\t/// and here are results\n\t/// https://codility.com/demo/results/demo6KZW2R-W9A/\n\t/// </summary>\n\tpublic sealed class FrogRiverOne\n\t{\n\t\tpublic Int32 Solve(Int32 x, Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (x <= 0)\n\t\t\t{\n\t\t\t\tthrow new ArgumentOutOfRangeException(\"x\");\n\t\t\t}\n\n\t\t\tHashSet<Int32> leaves = new HashSet<Int32>();\n\n\t\t\tInt32 steps = 0;\n\n\t\t\tfor (Int32 q = 0, qMax = input.Length; q < qMax; q++)\n\t\t\t{\n\t\t\t\tInt32 currentLeave = input[q];\n\n\t\t\t\tif (currentLeave < 1 || currentLeave > x)\n\t\t\t\t{\n\t\t\t\t\tthrow new ArgumentOutOfRangeException(\"currentLeave\");\n\t\t\t\t}\n\n\t\t\t\tif (!leaves.Contains(currentLeave))\n\t\t\t\t{\n\t\t\t\t\tleaves.Add(currentLeave);\n\n\t\t\t\t\tsteps++;\n\n\t\t\t\t\tif (steps >= x)\n\t\t\t\t\t{\n\t\t\t\t\t\treturn q;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn -1;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.6251935958862305,
"alphanum_fraction": 0.6417139768600464,
"avg_line_length": 19.38947296142578,
"blob_id": "0b8e4fb073fe0f596557e0392c4f7ffb02854b24",
"content_id": "d39d37033b7e89c7217cd287f389b97cbed5b9b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1939,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 95,
"path": "/Codility.Training/MinAvgTwoSlice.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// my solution of the task FrogRiverOne\n\t/// https://codility.com/demo/take-sample-test/min_avg_two_slice\n\t/// and here are results\n\t/// https://codility.com/demo/results/demo8WQ2PM-NMU/\n\t/// </summary>\n\tpublic sealed class MinAvgTwoSlice\n\t{\n\t\tpublic static Int32 Solve(Int32[] input)\n\t\t{\n\t\t\tif (null == input)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"input\");\n\t\t\t}\n\n\t\t\tif (input.Length < 2)\n\t\t\t{\n\t\t\t\tthrow new ArgumentException(\"input is empty\");\n\t\t\t}\n\n\t\t\tInt32 sliceStartStop = input.Length - 1;\n\n\t\t\tInt32 minSliceIndex = 0;\n\n\t\t\tInt32 sliceStart = 0;\n\n\t\t\tInt32 sliceEnd = 1;\n\n\t\t\tInt64 currentSliceSum = input[sliceStart] + input[sliceEnd];\n\n\t\t\tInt32 currentSliceLength = sliceEnd - sliceStart + 1;\n\n\t\t\tDouble minSliceAvg = (currentSliceSum * 1.0) / currentSliceLength;\n\n\t\t\twhile (sliceEnd < input.Length && sliceStart < sliceStartStop)\n\t\t\t{\n\t\t\t\tif (currentSliceLength == 2)\n\t\t\t\t{\n\t\t\t\t\tif (sliceEnd < sliceStartStop) //// there can be a longer slice\n\t\t\t\t\t{\n\t\t\t\t\t\tsliceEnd++;\n\n\t\t\t\t\t\tcurrentSliceLength++;\n\n\t\t\t\t\t\tcurrentSliceSum += input[sliceEnd];\n\n\t\t\t\t\t\tDouble currentSliceAvg = currentSliceSum * 1.0 / currentSliceLength;\n\n\t\t\t\t\t\tif (currentSliceAvg < minSliceAvg)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tminSliceAvg = currentSliceAvg;\n\n\t\t\t\t\t\t\tminSliceIndex = sliceStart;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if (currentSliceLength == 3)\n\t\t\t\t{\n\t\t\t\t\tcurrentSliceSum -= input[sliceStart];\n\n\t\t\t\t\tsliceStart++;\n\n\t\t\t\t\tcurrentSliceLength--;\n\n\t\t\t\t\tDouble currentSliceAvg = currentSliceSum * 1.0 / currentSliceLength;\n\n\t\t\t\t\tif (currentSliceAvg < minSliceAvg)\n\t\t\t\t\t{\n\t\t\t\t\t\tminSliceAvg = currentSliceAvg;\n\n\t\t\t\t\t\tminSliceIndex = sliceStart;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tthrow new ApplicationException(\"How did it happen ??\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn minSliceIndex;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.46357616782188416,
"alphanum_fraction": 0.5121412873268127,
"avg_line_length": 22.842105865478516,
"blob_id": "59335d7f10da6c56913ea38a88f07e1f688948be",
"content_id": "4c8d3b67b7cd975ff0d264debc3c856935fcfd2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 19,
"path": "/Python/PassingCars.py",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "\n# PassingCars\n# https://app.codility.com/programmers/lessons/5-prefix_sums/passing_cars/\n# https://app.codility.com/demo/results/trainingRXF888-6T2/\ndef solution(arr):\n if arr == []:\n return 0\n \n result = 0\n zeros = 0\n \n for x in arr:\n if x == 0:\n zeros += 1\n else:\n if result > 1000000000 - zeros:\n return -1\n result += zeros \n \n return result"
},
{
"alpha_fraction": 0.6038751602172852,
"alphanum_fraction": 0.6232507824897766,
"avg_line_length": 17.215686798095703,
"blob_id": "1480012b35df6930adf6ed8b39a77307ae1ab31f",
"content_id": "b23689e0c66e1fa9d251ffa4897483579d2fb98f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 931,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 51,
"path": "/Codility.Training/PermCheck.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Codility.Training\n{\n\t/// <summary>\n\t/// solution of the Codility task\n\t/// https://codility.com/demo/take-sample-test/perm_check\n\t/// and my results\n\t/// https://codility.com/demo/results/demoG8UJRV-RAD/\n\t/// </summary>\n\tpublic sealed class PermCheck\n\t{\n\t\tpublic Int32 Solve(Int32[] elements)\n\t\t{\n\t\t\tif (null == elements)\n\t\t\t{\n\t\t\t\tthrow new ArgumentNullException(\"elements\");\n\t\t\t}\n\n\t\t\tif (elements.Length <= 0)\n\t\t\t{\n\t\t\t\treturn 0;\n\t\t\t}\n\n\t\t\tBoolean[] flags = new Boolean[elements.Length];\n\n\t\t\tfor (Int32 q = 0, qMax = elements.Length; q < qMax; q++)\n\t\t\t{\n\t\t\t\tInt32 currentItem = elements[q];\n\n\t\t\t\tif (currentItem <= 0 || currentItem > elements.Length)\n\t\t\t\t{\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\n\t\t\t\tif (flags[currentItem - 1])\n\t\t\t\t{\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\n\t\t\t\tflags[currentItem - 1] = true;\n\t\t\t}\n\n\t\t\treturn 1;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.6077669858932495,
"alphanum_fraction": 0.6514562964439392,
"avg_line_length": 17.727272033691406,
"blob_id": "212ea78bcbe4a2830b3d0b7675dc665833529446",
"content_id": "36be0a547325576735dd46af8b33d25265283379",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1032,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 55,
"path": "/Codility.Training.Tests/DominatorTests.cs",
"repo_name": "istiophorus/Codility",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Microsoft.VisualStudio.TestTools.UnitTesting;\n\nnamespace Codility.Training.Tests\n{\n\t[TestClass]\n\tpublic sealed class DominatorTests\n\t{\n\t\tprivate readonly Dominator _target = new Dominator();\n\n\t\t[TestMethod]\n\t\tpublic void Test01()\n\t\t{\n\t\t\tInt32 index = _target.Solve(new Int32[] { 3, 4, 3, 2, 3, -1, 3, 3 });\n\n\t\t\tAssert.AreEqual(7, index);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestEmpty()\n\t\t{\n\t\t\tInt32 index = _target.Solve(new Int32[] { });\n\n\t\t\tAssert.AreEqual(-1, index);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestSingle()\n\t\t{\n\t\t\tInt32 index = _target.Solve(new Int32[] { 1 });\n\n\t\t\tAssert.AreEqual(0, index);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestNoDominator()\n\t\t{\n\t\t\tInt32 index = _target.Solve(new Int32[] { 1, 2, 3, 4});\n\n\t\t\tAssert.AreEqual(-1, index);\n\t\t}\n\n\t\t[TestMethod]\n\t\tpublic void TestNoDominator2()\n\t\t{\n\t\t\tInt32 index = _target.Solve(new Int32[] { 1, 2, 2, 4 });\n\n\t\t\tAssert.AreEqual(-1, index);\n\t\t}\n\t}\n}\n"
}
] | 58 |
mimn97/Numerical-Analysis
|
https://github.com/mimn97/Numerical-Analysis
|
3d361931f6e846a297d2874ef2073e06e113fc1d
|
ccd450b0b3d0bda5e56a7980fce34bfbe4e54d63
|
44f5927be62b9e86054a900543a3c3cd36352acc
|
refs/heads/main
| 2022-12-25T06:51:18.742598 | 2020-10-07T09:06:26 | 2020-10-07T09:06:26 | 301,978,526 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.41647180914878845,
"alphanum_fraction": 0.4749285578727722,
"avg_line_length": 28.3435115814209,
"blob_id": "dc132e7c362d9f3f7bd78eb16cd363238c572b65",
"content_id": "90cc7b0fd6fadb664581ecf1133d7cde37410ca3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3849,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 131,
"path": "/PC.py",
"repo_name": "mimn97/Numerical-Analysis",
"src_encoding": "UTF-8",
"text": "from scipy.integrate import solve_ivp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Milne-Simpson PC method\n\ndef milnePC(def_fn, xa, xb, ya, N):\n\n f = def_fn # intakes function to method to approximate\n\n h = (xb - xa) / N # creates step size based on input values of a, b, N\n\n t = np.arange(xa, xb + h, h) # array initialized to hold mesh points t\n\n y = np.zeros((N + 1,)) # array to hold Midpoint Method approximated y values\n\n y[0] = ya # initial condition\n\n # using RK4 to obtain the first 3 points\n\n for i in range(0, N):\n if i in range(0, 3):\n k1 = h * f(t[i], y[i])\n k2 = h * f(t[i] + (h / 2.0), y[i] + (k1 / 2.0))\n k3 = h * f(t[i] + (h / 2.0), y[i] + (k2 / 2.0))\n k4 = h * f(t[i] + h, y[i] + k3)\n\n y[i + 1] = y[i] + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0\n\n else:\n y[i + 1] = y[i-3] + (4*h/3)*(2*f(t[i], y[i]) - f(t[i-1], y[i-1])\n + 2*f(t[i-2], y[i-2]))\n\n y[i + 1] = y[i-1] + (h/3)*(f(t[i+1], y[i+1]) + 4*f(t[i], y[i])\n + f(t[i-1], y[i-1]))\n\n return t, y\n\n\n# Adams Fourth Order PC\n\ndef adamsPC(def_fn, xa, xb, ya, h):\n f = def_fn # intakes function to method to approximate\n\n N = int((xb - xa) / h) # creates step size based on input values of a, b, N\n\n t = np.arange(xa, xb + h, h) # array intialized to hold mesh points t\n\n y = np.zeros((N + 1,)) # array to hold Midpoint Method approximated y values\n\n y[0] = ya # initial condition\n\n # using RK4 to obtain the first 3 points\n for i in range(0, N):\n if i in range(0, 3):\n k1 = h * f(t[i], y[i])\n k2 = h * f(t[i] + (h / 2.0), y[i] + (k1 / 2.0))\n k3 = h * f(t[i] + (h / 2.0), y[i] + (k2 / 2.0))\n k4 = h * f(t[i] + h, y[i] + k3)\n\n y[i + 1] = y[i] + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0\n\n else:\n\n y[i + 1] = y[i] + (h/24.0) * (55.0 * f(t[i], y[i]) - 59.0 * f(t[i - 1], y[i - 1])\n + 37.0 * f(t[i - 2], y[i - 2]) - 9.0 * f(t[i - 3], y[i - 3]))\n\n y[i + 1] = y[i] + (h/24.0) * (9.0 * f(t[i + 1], y[i + 1])\n + 19.0 * f(t[i],y[i]) - 5.0 * f(t[i - 1], y[i - 1]) + f(t[i - 2], y[i - 2]))\n\n return t, y\n\n\nif __name__ == \"__main__\":\n\n d_f = lambda x, y: (2 - 2*x*y)/(x**2 + 1)\n f = lambda x: (2*x + 1)/(x**2 + 1)\n x_1 = np.arange(0, 1.1, 0.1)\n x_2 = np.arange(0, 1.05, 0.05)\n\n x_milne_1, result_milne_1 = milnePC(d_f, 0, 1, 1, 10)\n x_milne_2, result_milne_2 = milnePC(d_f, 0, 1, 1, 20)\n\n x_adam_1, result_adam_1 = adamsPC(d_f, 0, 1, 1, 0.1)\n x_adam_2, result_adam_2 = adamsPC(d_f, 0, 1, 1, 0.05)\n\n y_exact_1 = f(x_1)\n y_exact_2 = f(x_2)\n\n print(result_adam_1)\n\n err_milne_1 = np.abs(y_exact_1 - result_milne_1)\n err_adam_1 = np.abs(y_exact_1 - result_adam_1)\n err_milne_2 = np.abs(y_exact_2 - result_milne_2)\n err_adam_2 = np.abs(y_exact_2 - result_adam_2)\n\n print(err_adam_1)\n print(err_adam_2)\n\n for i in range(len(err_adam_1)):\n print(err_adam_1[i] / err_adam_2[i*2])\n\n\n print(err_milne_1)\n print(err_milne_2)\n\n for i in range(len(err_milne_1)):\n print(err_milne_1[i] / err_milne_2[i*2])\n\n plt.figure(1)\n\n plt.plot(x_1, err_adam_1, label='ABM4')\n plt.plot(x_1, err_milne_1, label='Milne-Simpson')\n #plt.plot(x_2, err_adam_2, label='h=0.05')\n\n plt.xlabel('t')\n plt.ylabel('Absolute Error')\n plt.title('Stability Comparison when h = 0.1')\n plt.legend()\n\n plt.figure(2)\n\n plt.plot(x_1, err_milne_1, label='h=0.1')\n plt.plot(x_2, err_milne_2, label='h=0.05')\n plt.xlabel('t')\n plt.ylabel('Absolute Error')\n plt.title('Milne-Simpson Predictor-Corrector')\n plt.legend()\n\n plt.show()\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7937853336334229,
"alphanum_fraction": 0.805084764957428,
"avg_line_length": 43.125,
"blob_id": "e2ea3b5541ca491ae13893451228d1f2946a7190",
"content_id": "bacbd5de208195ed8ed2f44b6c0f7936af0b646e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 354,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 8,
"path": "/README.md",
"repo_name": "mimn97/Numerical-Analysis",
"src_encoding": "UTF-8",
"text": "# Numerical Analysis Final Project - Predictor Corrector Method\n\nThis is created for final project for the course Numerical Analysis in Spring 2020. \nThe purpose of this project is to examine Predictor-Corrector methods for Initial Value Problems (IVPs). \n\n## PC.py \n\nA Python Numpy program for implementing several types of Predictor-Corrector methods \n"
}
] | 2 |
arkhipus/RnD
|
https://github.com/arkhipus/RnD
|
808d9fcb359b2fbb126876e918cd05adceb848bf
|
3ecd76daf1118e04d1a897eb835cacc30ae834f6
|
674501911489ec3602176966e6f305390bd16bce
|
refs/heads/master
| 2022-03-08T08:55:07.804156 | 2022-02-23T05:42:03 | 2022-02-23T05:42:03 | 13,822,873 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5921052694320679,
"alphanum_fraction": 0.5947368144989014,
"avg_line_length": 16.272727966308594,
"blob_id": "d997db7eeaebeb30fc26b70041e1c28ea1651711",
"content_id": "930ed5f22392964d7f848016a2fa3b2f993e4533",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 382,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 22,
"path": "/ProtoStudy/Models/UserRequest.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing ProtoBuf;\n\nnamespace Models\n{\n [ProtoContract]\n public class UserRequest\n {\n [ProtoMember(1)]\n public int UserId { get; set; }\n\n private UserRequest() { }\n\n public UserRequest(int userId)\n {\n this.UserId = userId;\n }\n }\n}\n"
},
{
"alpha_fraction": 0.5383502244949341,
"alphanum_fraction": 0.5441389083862305,
"avg_line_length": 21.29032325744629,
"blob_id": "4280d5201d68d0d8c7785ef7a297f0f4fef5cef0",
"content_id": "3f14a52eab95ebb0948f6f02ac580d0a6a8564ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 31,
"path": "/ProtoStudy/Models/User.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System.Collections.Generic;\nusing ProtoBuf;\n\nnamespace Models\n{\n [ProtoContract]\n public class User\n {\n [ProtoMember(1)]\n public int Id { get; private set; }\n\n [ProtoMember(2)]\n public string Name { get; set; }\n\n [ProtoMember(3)]\n public bool Active { get; set; }\n\n [ProtoMember(4)]\n public IDictionary<string, string> Metadata { get; private set; }\n\n private User() { }\n\n public User(int id, string name, bool active, IDictionary<string, string> meta)\n {\n this.Id = id;\n this.Name = name;\n this.Active = active;\n this.Metadata = meta;\n }\n }\n}\n"
},
{
"alpha_fraction": 0.661057710647583,
"alphanum_fraction": 0.7415865659713745,
"avg_line_length": 40.5,
"blob_id": "0c7f81c71075d2bf9910b27083e8788e6b586846",
"content_id": "1c39aa08b529872b3397b70766fc72252792b76f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 20,
"path": "/CachingComp/readme.md",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "## CachingComp\n\nA benchmark comparison of four different approaches to distributed caching: \n1. Redis with Twemproxy (https://github.com/twitter/twemproxy) \n2. Redis sharding with Consistent Hashing \n3. Memcached with Twemproxy \n4. Memcached sharding with Consistent Hashing \n\nTypical results via relatively slow wi-fi network and 8-node virtual cluster: \n\n10000 iterations of each: \nredis_proxy writes: 42.94 sec, reads: 29.88 sec, hit ratio: 100.00% \nmemcached_proxy writes: 30.98 sec, reads: 30.69 sec, hit ratio: 100.00% \nsharded_redis writes: 33.50 sec, reads: 32.96 sec, hit ratio: 100.00% \nsharded_memcached writes: 33.12 sec, reads: 32.20 sec, hit ratio: 100.00% \n\n### Dependencies:\n* https://github.com/andymccurdy/redis-py \n* https://github.com/linsomniac/python-memcached \n* http://amix.dk/blog/post/19367 \n"
},
{
"alpha_fraction": 0.5206167697906494,
"alphanum_fraction": 0.52512127161026,
"avg_line_length": 32.75438690185547,
"blob_id": "edab94afc26a6e3fed8636f6dc0608a41a638e28",
"content_id": "a004612052996b6ed5e4ac84712d1179744247e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 5774,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 171,
"path": "/CassandraStudy/CassandraStudy/Dal.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Linq;\nusing System.Collections.Generic;\nusing CassandraSharp;\nusing CassandraSharp.Config;\nusing CassandraSharp.CQL;\nusing CassandraSharp.CQLPoco;\nusing CassandraSharp.CQLPropertyBag;\nusing System.Diagnostics;\nusing System.Threading;\nusing CassandraStudy.Schemas;\nusing System.Threading.Tasks;\n//using CassandraSharp.Extensibility;\n\nnamespace CassandraStudy\n{\n internal class Dal : IDal\n {\n\n\n public Dal()\n {\n XmlConfigurator.Configure();\n }\n\n public IEnumerable<IDictionary<string, object>> GetUsers(string startKey, int slice)\n {\n using (ICluster cluster = ClusterManager.GetCluster(\"TestCassandra\"))\n {\n var cmd = cluster.CreatePropertyBagCommand();\n string cqlUsers = string.IsNullOrEmpty(startKey) ?\n string.Format(\"SELECT * FROM dispatch_cql3.users LIMIT {0}\", slice) :\n string.Format(\"SELECT * FROM dispatch_cql3.users WHERE token(uid) > token('{0}') LIMIT {1}\", startKey, slice);\n\n var users = cmd.Execute<IDictionary<string, object>>(cqlUsers).AsFuture();\n users.Wait();\n\n return users.Result;\n }\n }\n\n public IEnumerable<IDictionary<string, object>> GetUser(string uid)\n {\n throw new NotImplementedException();\n }\n\n public IDictionary<string, object> GetUserInFlow(string uid, string flow)\n {\n throw new NotImplementedException();\n }\n\n public string GetLastStateForUser(string uid, string flow)\n {\n throw new NotImplementedException();\n }\n\n public IEnumerable<IDictionary<string, object>> GetFlows(string startKey, int slice)\n {\n throw new NotImplementedException();\n }\n\n public IDictionary<string, object> GetFlow(string flow)\n {\n throw new NotImplementedException();\n }\n\n public int UpdateLastStateForUser(string uid, string lastState)\n {\n throw new NotImplementedException();\n }\n\n public int AddUser(IDictionary<string, object> user)\n {\n throw new NotImplementedException();\n }\n\n public int UpdateUser(IDictionary<string, object> user)\n {\n throw new NotImplementedException();\n }\n\n public Tuple<int, long> GenerateUsers(int num)\n {\n int count = 0;\n Random rnd = new Random();\n string[] flows = GetAllFlows().ToArray();\n const string chars = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\";\n Guid[] ids = GenerateIds((int)Math.Ceiling(num * 0.8)).ToArray();\n //Guid[] ids = GenerateIds(num).ToArray();\n\n Stopwatch st = new Stopwatch();\n\n using (ICluster cluster = ClusterManager.GetCluster(\"TestCassandra\"))\n {\n var cmd = cluster.CreatePocoCommand();\n const string countUsers = \"select count(*) from dispatch_cql3.users limit 1000000\";\n const string insertBatch = \"INSERT INTO dispatch_cql3.users (uid, flow, last_state, test1, test2) VALUES (?, ?, ?, ?, ?)\";\n\n // Count users before\n var resCount = cmd.Execute<CountSchema>(countUsers).Result;\n long countBefore = resCount.First().Count;\n\n // Add users\n st.Start();\n var preparedInsert = cmd.Prepare(insertBatch);\n List<Task> tasks = new List<Task>();\n\n\n for (int i = 0; i < num; i++)\n {\n var res = preparedInsert.Execute(new\n {\n uid = ids[rnd.Next(ids.Length)].ToString(),\n flow = flows[rnd.Next(flows.Length)],\n last_state = \"/random\",\n test1 = chars[rnd.Next(chars.Length)].ToString(),\n test2 = chars[rnd.Next(chars.Length)].ToString()\n },\n ConsistencyLevel.QUORUM);//.ContinueWith(_ => Interlocked.Increment(ref count));\n tasks.Add(res);\n\n //res.Wait();\n }\n\n Task.WaitAll(tasks.ToArray(), 10);\n st.Stop();\n\n // Count users after\n resCount = cmd.Execute<CountSchema>(countUsers).Result;\n long countAfter = resCount.First().Count;\n\n count = (int)(countAfter - countBefore);\n }\n\n ClusterManager.Shutdown();\n return new Tuple<int, long>(count, st.ElapsedMilliseconds);\n }\n\n private IEnumerable<Guid> GenerateIds(int num)\n {\n for (int i = 0; i < num; i++)\n {\n yield return Guid.NewGuid();\n }\n }\n\n private static IEnumerable<string> GetAllFlows()\n {\n List<string> flows = new List<string>();\n using (ICluster cluster = ClusterManager.GetCluster(\"TestCassandra\"))\n {\n var cmd = cluster.CreatePropertyBagCommand();\n const string cql = \"SELECT flow FROM dispatch_cql3.flows\";\n var resUsers = cmd.Execute<IDictionary<string, object>>(cql).ContinueWith(res =>\n {\n foreach (var item in res.Result)\n {\n foreach (var dic in item)\n {\n flows.Add(dic.Value.ToString());\n }\n }\n });\n\n resUsers.Wait();\n }\n\n return flows;\n }\n }\n}\n"
},
{
"alpha_fraction": 0.4804285168647766,
"alphanum_fraction": 0.4845488369464874,
"avg_line_length": 30.9342098236084,
"blob_id": "345bf3c37fa45b7515b362b36614eb4fa9ec21f2",
"content_id": "4e36d370adf3750907a16a249f9ff7ffb6631193",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2429,
"license_type": "no_license",
"max_line_length": 343,
"num_lines": 76,
"path": "/ProtoStudy/Dal/MockedUserDao.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing Models;\n\nnamespace Dal\n{\n public class MockedUserDao : IDao\n {\n private static string[] metaKeys = { \"name\", \"dob\", \"dod\", \"surname\", \"pob\", \"pod\", \"address\", \"ssn\", \"employer\", \"citizenship\" };\n private static string[] names = { \"Rocio Radtke\", \"Lorretta Larock\", \"Annetta Arendt\", \"Arica Arendt\", \"Gayla Garris\", \"Betty Brathwaite\", \"Sharell Spadaro\", \"Marjorie Michel\", \"Corinna Carleton\", \"Easter Eggleston\", \"Carina Crick\", \"Isaiah Iwamoto\", \"Bella Bullington\", \"Bea Bento\", \"Romelia Rhett\", \"Michele Mulvey\", \"Kathlene Kinlaw\", \"Ida Imai\", \"Merrie Morin\", \"Shanta Saulsberry\", \"Thora Terhaar\", \"Lee Lemelle\", \"Antonio Augsburger\", \"Lilliana Looby\", \"Mafalda Mccorvey\", \"Booker Bejarano\", \"Bernita Buis\", \"Marleen Madia\", \"Eugene Exley\", \"Bari Brant\" };\n private static IList<User> usersCache;\n\n private static Random rnd;\n\n static MockedUserDao()\n {\n rnd = new Random();\n usersCache = new List<User>();\n for (int i = 1; i <= 100; i++)\n {\n usersCache.Add(new User(i, names[rnd.Next(names.Length)], rnd.Next() % 2 == 0, GenerateMetaData()));\n }\n }\n\n public MockedUserDao()\n {\n if (usersCache == null || usersCache.Count == 0)\n {\n throw new InvalidProgramException(\"Invalid initialization.\");\n }\n }\n\n public User GetUser(int id)\n {\n return usersCache.SingleOrDefault(u => u.Id == id);\n }\n\n public IEnumerable<User> GetUsers()\n {\n return usersCache;\n }\n\n public int AddUser(string name, IDictionary<string, string> meta)\n {\n int i = usersCache.Max<User>(u => u.Id);\n usersCache.Add(new User(i++, name, false, meta));\n return i;\n }\n\n public void ActivateUser(int id)\n {\n usersCache.Single(u => u.Id == id).Active = true;\n }\n\n public static IDictionary<string, string> GenerateMetaData()\n {\n Dictionary<string, string> retVal = new Dictionary<string, string>();\n int max = metaKeys.Length;\n int count = rnd.Next(3, max);\n for (int i = 0; i < count; i++)\n {\n string key;\n do\n {\n key = metaKeys[rnd.Next(0, max)];\n } while (retVal.ContainsKey(key));\n\n retVal.Add(key, Guid.NewGuid().ToString());\n }\n\n return retVal;\n }\n }\n}\n"
},
{
"alpha_fraction": 0.6147540807723999,
"alphanum_fraction": 0.6147540807723999,
"avg_line_length": 14.25,
"blob_id": "46af16d0a31003258f658836ebf3e42979f56c06",
"content_id": "3f0e0c77ad37c8a14c730596167a26489b8c729f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 8,
"path": "/CassandraStudy/CassandraStudy/Schemas/CountSchema.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "\nnamespace CassandraStudy.Schemas\n{\n internal class CountSchema\n {\n public long Count { get; set; }\n }\n}\n"
},
{
"alpha_fraction": 0.6179999709129333,
"alphanum_fraction": 0.6349999904632568,
"avg_line_length": 17.18181800842285,
"blob_id": "ae0d8f693ad4829013d81bdfdd1a3ccbd9cc9972",
"content_id": "57849008e91e8e631caf9bad30734d6098c5c134",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1000,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 55,
"path": "/ProtoStudy/OSX-Python/client.py",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n\"\"\"\nProtobuf via 0MQ POC\n\"\"\"\n\nimport sys\nimport user_pb2\nimport zmq\n\nhost = \"tcp://aarkhipov00:5555\"\n\ndef main():\n\tcontext = zmq.Context()\n\tsocket = context.socket(zmq.REQ)\n\tsocket.connect(host)\n\n\twhile True:\n\n\t\t## Get user id from user\n\t\ttry:\n\t\t\tuid = int(raw_input(\"\\nUser ID: \"))\n\t\texcept ValueError:\n\t\t\tprint(\"Oops! ID must be an int.\")\n\t\t\tcontinue\n\n\t\t## Create request, serialize and send\n\t\tureq = user_pb2.UserRequest()\n\t\tureq.UserId = uid\n\t\tsocket.send (ureq.SerializeToString())\n\n\t\t## Receive proto user\n\t\tresponse = socket.recv()\n\t\tif(len(response) == 0):\n\t\t\tprint(\"User not found.\")\n\t\t\tcontinue\n\n\t\t## Deserialize user\n\t\tuser = user_pb2.User()\n\t\tuser.ParseFromString(response)\n\n\t\t## Print results\n\t\tprint('ID: {0}, Name: {1}, Active: {2}'.format(user.Id, user.Name ,user.Active))\n\t\tprint('Meta:'),\n\t\tprintMeta(user.Metadata)\n\ndef printMeta(kv):\n\tif(len(kv) == 0):\n\t\treturn\n\n\tfor x in kv:\n\t\tprint('\\t{0}: {1}'.format(x.Key, x.Value))\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6592292189598083,
"alphanum_fraction": 0.6592292189598083,
"avg_line_length": 20.434782028198242,
"blob_id": "deb2ec28d648469ef312ec41be08fe5c96356ead",
"content_id": "201e4ba9548d53078d5fb8e825f0862435299b6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 23,
"path": "/CassandraStudy/CassandraStudy/Schemas/ColumnSchema.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace CassandraStudy.Schemas\n{\n internal class ColumnSchema\n {\n public string KeyspaceName { get; set; }\n\n public string ColumnFamilyName { get; set; }\n\n public string ColumnName { get; set; }\n\n public int ComponentIndex { get; set; }\n\n public string Validator { get; set; }\n\n public string IndexName { get; set; }\n }\n}\n"
},
{
"alpha_fraction": 0.5502709150314331,
"alphanum_fraction": 0.5752558708190918,
"avg_line_length": 37.18390655517578,
"blob_id": "f9604f810425460fb293f2e6a507d15314fb67d7",
"content_id": "c250f1106cda4c1c86419a137f07cee23676f2a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 3324,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 87,
"path": "/CassandraStudy/CassandraStudy/Program.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Dynamic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing CassandraSharp;\nusing CassandraSharp.CQL;\nusing CassandraSharp.Config;\nusing CassandraSharp.CQLPoco;\nusing CassandraSharp.CQLPropertyBag;\nusing CassandraSharp.Extensibility;\nusing System.Diagnostics;\nusing CassandraStudy.Schemas;\n\n\nnamespace CassandraStudy\n{\n class Program\n {\n static void Main(string[] args)\n {\n Dal dal = new Dal();\n Stopwatch st = new Stopwatch();\n\n //// Generate users\n Tuple<int, long> done = dal.GenerateUsers(100000);\n Console.WriteLine(\"Created {0} users in {1} ms.\", done.Item1, done.Item2);\n\n //// Reading Users\n //st.Restart();\n //var users = dal.GetUsers(\"\", 10);\n //st.Stop();\n //int count = users.Count();\n //DisplayResult(users);\n //Console.WriteLine(\"Read {0} users in {1} ms.\", count, st.ElapsedMilliseconds);\n\n\n //XmlConfigurator.Configure();\n //using (ICluster cluster = ClusterManager.GetCluster(\"TestCassandra\"))\n //{\n //// //Get all columns\n //// //var cmd = cluster.CreatePocoCommand();\n //// //const string cqlKeyspaces = \"SELECT * from system.schema_columns where keyspace_name = 'dispatch_cql3' AND columnfamily_name = 'users'\";\n //// //var resTask = cmd.Execute<SchemaColumns>(cqlKeyspaces).ContinueWith(res => DisplayResult(res.Result));\n //// //resTask.Wait();\n\n\n // // var cmd = cluster.CreatePropertyBagCommand();\n // // const string cqlUsers = \"SELECT * FROM dispatch_cql3.users\";// WHERE uid = '05f7200f-d000-0000-0000-000000000000' AND flow = 'Merlin_1'\";\n // // var resUsers = cmd.Execute<IDictionary<string, object>>(cqlUsers).ContinueWith(res => DisplayResult(res.Result));\n // // resUsers.Wait();\n\n // var cmd = cluster.CreatePropertyBagCommand();\n // const string cql = \"SELECT flow FROM dispatch_cql3.flows\";// WHERE uid = '05f7200f-d000-0000-0000-000000000000' AND flow = 'Merlin_1'\";\n // var resUsers = cmd.Execute<IDictionary<string, object>>(cql).ContinueWith(res => DisplayResult(res.Result));\n // resUsers.Wait();\n //}\n\n //ClusterManager.Shutdown();\n Console.ReadLine();\n }\n\n private static void DisplayResult(IEnumerable<IDictionary<string, object>> req)\n {\n foreach (var row in req)\n {\n foreach (var col in row)\n {\n Console.Write(\"{0}:'{1}' \", col.Key, col.Value); \n }\n\n Console.WriteLine();\n }\n }\n\n private static void DisplayResult(IEnumerable<ColumnSchema> req)\n {\n foreach (ColumnSchema schemaColumns in req)\n {\n Console.WriteLine(\"KeyspaceName={0} ColumnFamilyName={1} ColumnName={2} IndexName={3}\",\n schemaColumns.KeyspaceName, schemaColumns.ColumnFamilyName, schemaColumns.ColumnName, schemaColumns.IndexName);\n }\n }\n }\n}\n"
},
{
"alpha_fraction": 0.597835123538971,
"alphanum_fraction": 0.6044962406158447,
"avg_line_length": 19.70689582824707,
"blob_id": "78fd9964ba6d2e1c8cebcd5bc0c40e1480d7decf",
"content_id": "15d65072f8899d06827bebac1b6f0af81b74eef6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1201,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 58,
"path": "/README.md",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "Some random stuff. Mostly R&D projects. Forked from acom-alex.\n\n# Mermain Demo\nDocs: http://mermaid-js.github.io/mermaid/ \n\n\n## Graph test\n```mermaid\ngraph TD\n A--GET-->B\n A--GET-->C\n C-.PUT.->D\n B-->D[(Database)]\n```\n\n## Graph with links\n```mermaid\nflowchart LR;\n A-->B;\n B-->C;\n C-->D;\n click A callback \"Tooltip for a callback\"\n click B \"http://www.github.com\" \"This is a tooltip for a link\"\n click A call callback() \"Tooltip for a callback\"\n click B href \"http://www.github.com\" \"This is a tooltip for a link\"\n```\n\n## Sequence Diagram\n```mermaid\nsequenceDiagram\n participant dotcom\n participant iframe\n participant viewscreen\n dotcom->>iframe: loads html w/ iframe url\n iframe->>viewscreen: request template\n viewscreen->>iframe: html & javascript\n iframe->>dotcom: iframe ready\n dotcom->>iframe: set mermaid data on iframe\n iframe->>iframe: render mermaid\n```\n## Nested graphs\n\n```mermaid\nflowchart LR\n subgraph TOP\n direction TB\n subgraph B1\n direction RL\n i1 -->f1\n end\n subgraph B2\n direction BT\n i2 -->f2\n end\n end\n A --> TOP --> B\n B1 --> B2\n````\n"
},
{
"alpha_fraction": 0.6656534671783447,
"alphanum_fraction": 0.6656534671783447,
"avg_line_length": 19.5625,
"blob_id": "cf72b9a5154f0efc21fb166a15e1bbc6b82f0ce8",
"content_id": "e22edd00aec4ec36ce63e0b6318f5bf299f75aa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 331,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 16,
"path": "/ProtoStudy/Dal/IDao.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing Models;\n\nnamespace Dal\n{\n public interface IDao\n {\n int AddUser(string name, IDictionary<string, string> meta);\n User GetUser(int id);\n IEnumerable<User> GetUsers();\n void ActivateUser(int id);\n }\n}\n"
},
{
"alpha_fraction": 0.35274651646614075,
"alphanum_fraction": 0.35704830288887024,
"avg_line_length": 31.84782600402832,
"blob_id": "fb514ed0c7850b6b02e490d836ae1a12b14a0132",
"content_id": "fec2a11a40b73718a59482e7fe36731b81476f88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 3024,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 92,
"path": "/ProtoStudy/ZClient/Program.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Models;\nusing ZMQ;\n\nnamespace ZClient\n{\n class Program\n {\n static void Main(string[] args)\n {\n Console.WriteLine(\"*** ZMQ Client ***\");\n\n using (var context = new Context(1))\n {\n using (Socket requester = context.Socket(SocketType.REQ))\n {\n requester.Connect(\"tcp://localhost:5555\");\n\n while (true)\n {\n Console.Write(\"User ID: \");\n string input = Console.ReadLine();\n\n if (string.IsNullOrEmpty(input))\n {\n break;\n }\n\n int uid;\n if (int.TryParse(input, out uid))\n {\n // Send request.\n byte[] req;\n using (MemoryStream ms = new MemoryStream())\n {\n ProtoBuf.Serializer.Serialize<UserRequest>(ms, new UserRequest(uid));\n req = ms.ToArray();\n }\n requester.Send(req);\n\n // Get response.\n byte[] response = requester.Recv(); \n\n if (response.Length > 0)\n {\n User user;\n using (MemoryStream ms = new MemoryStream(response))\n {\n user = ProtoBuf.Serializer.Deserialize<User>(ms);\n }\n\n Console.WriteLine(\"Id: {0}, Name: {1}, Active: {2}\", user.Id, user.Name, user.Active);\n Console.WriteLine(\"Meta: {0}\", PrintMeta(user.Metadata));\n }\n else\n {\n Console.WriteLine(\"User not found.\");\n }\n }\n else\n {\n Console.WriteLine(\"Invalid Id. Must be an int.\");\n }\n\n Console.WriteLine();\n }\n }\n }\n }\n\n private static string PrintMeta(IDictionary<string, string> meta)\n {\n if (meta == null || meta.Count == 0)\n {\n return string.Empty;\n }\n\n StringBuilder sb = new StringBuilder();\n foreach (var item in meta)\n {\n sb.AppendFormat(\"\\t{0}: {1}\\n\", item.Key, item.Value);\n }\n\n return sb.ToString();\n }\n }\n}\n"
},
{
"alpha_fraction": 0.3478064239025116,
"alphanum_fraction": 0.35097241401672363,
"avg_line_length": 31.04347801208496,
"blob_id": "9e99b0037bf9797bb2360f1c1a2a6c62a4aff3f2",
"content_id": "04a261171403e69ec49206e6a1b0da0ca72e1ed3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 2213,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 69,
"path": "/ProtoStudy/ZMQ/Program.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Dal;\nusing Models;\nusing ProtoBuf;\nusing ZMQ;\n\nnamespace ZMQ\n{\n class Program\n {\n static void Main(string[] args)\n {\n Console.WriteLine(\"*** ZMQ Server ***\");\n\n using (var context = new Context(1))\n {\n using (Socket responder = context.Socket(SocketType.REP))\n {\n responder.Bind(\"tcp://*:5555\");\n\n IDao dal = new MockedUserDao();\n\n string proto = Serializer.GetProto<UserRequest>();\n\n while (true)\n {\n try\n {\n var request = responder.Recv();\n UserRequest uid;\n using (MemoryStream ms = new MemoryStream(request))\n {\n uid = Serializer.Deserialize<UserRequest>(ms);\n }\n\n Console.Write(\"Received request for {0}.\", uid.UserId);\n\n User u = dal.GetUser(uid.UserId);\n if (u != null)\n {\n Console.WriteLine(\" Found.\");\n using (MemoryStream ms = new MemoryStream())\n {\n Serializer.Serialize<User>(ms, u);\n responder.Send(ms.ToArray());\n }\n }\n else\n {\n Console.WriteLine(\" Not found.\");\n responder.Send();\n }\n }\n catch (System.Exception ex)\n {\n Console.WriteLine(\"Error: {0}\", ex.Message);\n break;\n }\n }\n }\n }\n }\n }\n}\n"
},
{
"alpha_fraction": 0.6062102913856506,
"alphanum_fraction": 0.6672547459602356,
"avg_line_length": 23.86842155456543,
"blob_id": "92b6b331631922c6efe7875fbae3842c326d1fea",
"content_id": "b6c0068626294e82e03f9c150fe31234b575ebb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2834,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 114,
"path": "/CachingComp/comp.py",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\"\nPerformance tesing of four different caching solutions.\nTo run: ./comp.py [iterations]\n\nhttps://github.com/andymccurdy/redis-py\nhttps://github.com/linsomniac/python-memcached\nhttp://amix.dk/blog/post/19367\n\"\"\"\n\nimport sys\nimport memcache\nimport redis\nfrom hash_ring import HashRing\nimport time\n\nmodes = ['redis_proxy', 'memcached_proxy', 'sharded_redis', 'sharded_memcached']\n\ncluster = {\n\t'redis_proxy':'10.129.1.48:6379',\n\t'memcached_proxy':'10.129.1.48:11211',\n\t'sharded_redis':['sfredis00:6381','sfredis00:6382','sfredis00:6383','sfredis00:6384','sfredis01:6381','sfredis01:6382','sfredis01:6383','sfredis01:6384'],\n\t'sharded_memcached':['sfredis00:11211','sfredis00:11212','sfredis00:11213','sfredis00:11214','sfredis01:11211','sfredis01:11212','sfredis01:11213','sfredis01:11214']\n}\n\nring = None\n\t\n# Memcached servers hash ring\n\ndef GetClient(key, mode):\n\tif mode.startswith('sharded'):\n\t\tserver = ring.get_node(key)\n\telse:\n\t\tserver = cluster[mode]\n\t\n\tif mode.find('memcached') > -1:\n\t\treturn memcache.Client([server], debug=0)\n\telse:\n\t\tparts = server.split(':')\n\t\treturn redis.Redis(host = parts[0], port = int(parts[1]))\n\ndef CreateKey(x):\n\treturn 'my_caching_key_{0}'.format(x)\n\ndef SetCache(mode, key, value):\n\tclient = GetClient(key, mode)\n\treturn client.set(key, value)\n\ndef GetCache(mode, key):\n\tclient = GetClient(key, mode)\n\treturn client.get(key)\n\ndef RemoveCache(mode, key):\n\tclient = GetClient(key, mode)\n\treturn client.delete(key)\n\ndef ConfigureHash(mode):\n\tglobal ring\n\tring = HashRing(cluster[mode]\n\ndef main():\n\thits = 0\n\tmisses = 0\n\tresults = []\n\n\tif len(sys.argv) > 1:\n\t\tcount = int(sys.argv[1])\n\telse:\n\t\tcount = 10\n\n\tfor mode in modes:\n\t\tConfigureHash(mode)\n\t\twrites = 0.0\n\t\treads = 0.0\n\n\t\tprint 'Writing using', mode\n\t\twstart = time.time()\n\t\tfor x in xrange(0,count):\n\t\t\tkey = CreateKey(x)\n\t\t\t#print \"{0} W {1}:{2}\".format(mode, key, x)\n\t\t\tretval = SetCache(mode, key, 'my_caching_value_%s' % x)\n\t\t\tif retval == 0 or retval is None:\n\t\t\t\traise Exception('Error adding value.')\n\t\twrites = time.time() - wstart\n\n\t\tprint 'Reading using', mode\n\t\trstart = time.time()\n\t\tfor x in xrange(0,count):\n\t\t\tkey = CreateKey(x)\n\t\t\tretval = GetCache(mode, key)\n\t\t\t#print \"{0} R {1}:{2}\".format(mode, key, retval)\n\t\t\tif retval != None:\n\t\t\t\thits += 1\n\t\t\telse:\n\t\t\t\tmisses += 1\n\t\treads = time.time() - rstart\n\n\t\tprint 'Cleaning up after', mode\n\t\tfor x in xrange(0,count):\n\t\t\tkey = CreateKey(x)\n\t\t\tretval = RemoveCache(mode, key)\n\t\t\tif retval != 1:\n\t\t\t\traise Exception('Error deleting value.')\n\n\t\tratio = hits / (hits + misses)\n\t\tresults.append([mode, writes, reads, ratio])\n\t\n\tprint \"\\n%s iterations of each:\" % count\n\tfor result in results:\n\t\tprint \"{0} writes: {1:.2f} sec, reads: {2:.2f} sec, hit ratio: {3:.2%}\".format(result[0], result[1], result[2], result[3])\n\nif __name__ == \"__main__\":\n\tmain()"
},
{
"alpha_fraction": 0.7158671617507935,
"alphanum_fraction": 0.7158671617507935,
"avg_line_length": 27.034482955932617,
"blob_id": "01b8af2f91ff201fbe3218a8934a09df3a90f765",
"content_id": "c0af4deb6312965d26b57b24151d8af71b4444c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 815,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 29,
"path": "/CassandraStudy/CassandraStudy/IDal.cs",
"repo_name": "arkhipus/RnD",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace CassandraStudy\n{\n internal interface IDal\n {\n IEnumerable<IDictionary<string, object>> GetUsers(string startKey, int slice);\n\n IEnumerable<IDictionary<string, object>> GetUser(string uid);\n\n IDictionary<string, object> GetUserInFlow(string uid, string flow);\n\n string GetLastStateForUser(string uid, string flow);\n\n IEnumerable<IDictionary<string, object>> GetFlows(string startKey, int slice);\n\n IDictionary<string, object> GetFlow(string flow);\n\n int UpdateLastStateForUser(string uid, string lastState);\n\n int AddUser(IDictionary<string, object> user);\n\n int UpdateUser(IDictionary<string, object> user);\n }\n}\n"
}
] | 15 |
lyogavin/pytorch-ssd
|
https://github.com/lyogavin/pytorch-ssd
|
cb03afe414bbd81db7b8802402f75e79cb22c35a
|
a51f1e3f0301db8618669f0d3b6125213c7240e7
|
945b761b6bec8713ee6216f2305529f2b6be6fd4
|
refs/heads/master
| 2022-09-22T07:04:37.927015 | 2020-06-05T05:05:18 | 2020-06-05T05:05:18 | 258,105,945 | 0 | 0 | null | 2020-04-23T05:43:37 | 2020-04-23T03:29:47 | 2020-04-05T22:33:17 | null |
[
{
"alpha_fraction": 0.5408052206039429,
"alphanum_fraction": 0.5494015216827393,
"avg_line_length": 34.338462829589844,
"blob_id": "2dd1d3b8d5e2e6035d2f3a7f0b675a65c8aa4219",
"content_id": "fd454ac7e7de49a4bda64c8db5e4c7e6dcce90cb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9190,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 260,
"path": "/vision/datasets/ava.py",
"repo_name": "lyogavin/pytorch-ssd",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pathlib\nimport cv2\nimport os\nimport pandas as pd\nimport copy\nimport json\n\nFPS = 3\n\nDEBUG = False\n\n\ndef sec_to_frame(sec):\n \"\"\"\n Convert time index (in second) to frame index.\n 0: 900\n 30: 901\n \"\"\"\n return (sec - 900) * FPS\n\n\nNUM_FRAMES = 3\nSAMPLE_RATE = FPS // NUM_FRAMES\n\ndef get_sequence(center_idx, half_len, sample_rate, num_frames):\n \"\"\"\n Sample frames among the corresponding clip.\n Args:\n center_idx (int): center frame idx for current clip\n half_len (int): half of the clip length\n sample_rate (int): sampling rate for sampling frames inside of the clip\n num_frames (int): number of expected sampled frames\n Returns:\n seq (list): list of indexes of sampled frames in this clip.\n \"\"\"\n seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate))\n\n for seq_idx in range(len(seq)):\n if seq[seq_idx] < 0:\n seq[seq_idx] = 0\n elif seq[seq_idx] >= num_frames:\n seq[seq_idx] = num_frames - 1\n return seq\n\nclass AVADataset:\n\n def __init__(self, root,\n transform=None, target_transform=None,\n dataset_type=\"train\", balance_data=False,\n single_frame_sec=False,\n return_image_id=False):\n self.root = pathlib.Path(root)\n self.transform = transform\n self.target_transform = target_transform\n self.dataset_type = dataset_type.lower()\n\n self.single_frame_sec = single_frame_sec\n self.return_image_id = return_image_id\n\n\n self.data, self.class_names, self.class_dict = self._read_data()\n self.balance_data = balance_data\n self.min_image_num = -1\n if self.balance_data:\n self.data = self._balance_data()\n self.ids = [info['image_id'] for info in self.data]\n\n self.class_stat = None\n\n def _getitem(self, index):\n image_info = self.data[index]\n image = self._read_image(image_info['image_id'])\n height, width, _ = image.shape\n #print('shapae to set hw', image.shape)\n # duplicate boxes to prevent corruption of dataset\n boxes = copy.copy(image_info['boxes'])\n boxes[:, 0] *= image.shape[1]\n boxes[:, 1] *= image.shape[0]\n boxes[:, 2] *= image.shape[1]\n boxes[:, 3] *= image.shape[0]\n # duplicate labels to prevent corruption of dataset\n labels = copy.copy(image_info['labels'])\n if self.return_image_id and self.transform:\n image = self.transform(image)\n boxes = None\n lables = None\n elif self.transform:\n image, boxes, labels = self.transform(image, boxes, labels)\n if self.target_transform:\n boxes, labels = self.target_transform(boxes, labels)\n\n if self.return_image_id:\n return image_info['image_id'], image, boxes, labels, height, width\n else:\n return image_info['image_id'], image, boxes, labels\n\n def __getitem__(self, index):\n if self.return_image_id:\n image_id, image, boxes, labels, height, width = self._getitem(index)\n return image_id, image, height, width\n else:\n image_id, image, boxes, labels = self._getitem(index)\n return image, boxes, labels\n\n def get_annotation(self, index):\n \"\"\"To conform the eval_ssd implementation that is based on the VOC dataset.\"\"\"\n\n if self.return_image_id:\n image_id, image, boxes, labels, _, _ = self._getitem(index)\n else:\n image_id, image, boxes, labels = self._getitem(index)\n is_difficult = np.zeros(boxes.shape[0], dtype=np.uint8)\n return image_id, (boxes, labels, is_difficult)\n\n def get_image(self, index):\n image_info = self.data[index]\n image = self._read_image(image_info['image_id'])\n if self.return_image_id:\n if self.transform:\n image = self.transform(image)\n else:\n if self.transform:\n image, _ = self.transform(image)\n return image\n\n def _read_data(self):\n annotation_file = f\"{self.root}/ava_{self.dataset_type}_v2.1.csv\"\n\n print('opening ', annotation_file)\n if DEBUG:\n annotation_file = annotation_file + \".debug\"\n annotations = pd.read_csv(annotation_file,\n names = ['video_id', 'sec_id', \"XMin\", \"YMin\", \"XMax\", \"YMax\", \"class_id\", \"person_id\"])\n\n class_names_dict = dict()\n class_dict = dict()\n\n max_class_id = 0\n\n\n with open(f\"{self.root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt\") as f:\n for line in f:\n\n if \"name:\" in line:\n class_name_start_pos = line.find('\"')\n class_name_end_pos = line.find('\"', class_name_start_pos+1)\n class_name = line[class_name_start_pos + 1: class_name_end_pos]\n\n if \"id:\" in line:\n class_id_start_pos = line.find(':')\n class_id = line[class_id_start_pos + 2:].rstrip()\n class_id = int(class_id)\n\n class_names_dict[class_id] = class_name\n max_class_id = max(max_class_id, class_id)\n class_dict[class_name] = class_id\n\n class_names = []\n\n\n for iii in range(max_class_id + 1):\n if iii in class_names_dict:\n class_names.append(class_names_dict[iii])\n else:\n class_names.append(\"\")\n\n print(class_names)\n\n\n\n\n none_exist_count = 0\n data = []\n for video_id_sec_id, group in annotations.groupby([\"video_id\", \"sec_id\"]):\n video_id, sec_id = video_id_sec_id\n frame = sec_to_frame(sec_id)\n if self.single_frame_sec:\n seq = [frame]\n else:\n seq = get_sequence(frame, NUM_FRAMES // 2, SAMPLE_RATE, FPS * (15 * 60 + 1))\n\n for frame_id in seq:\n image_id = f\"{video_id}_%06d\" % frame_id\n image_file = self.root / f\"{image_id}\"[:-7] / f\"{image_id}.jpg\"\n if not os.path.exists(image_file) or os.stat(image_file).st_size <= 0:\n none_exist_count += 1\n continue\n\n boxes = group.loc[:, [\"XMin\", \"YMin\", \"XMax\", \"YMax\"]].values.astype(np.float32)\n # make labels 64 bits to satisfy the cross_entropy function\n labels = np.array(group[\"class_id\"], dtype='int64')\n data.append({\n 'image_id': image_id,\n 'boxes': boxes,\n 'labels': labels\n })\n\n print('non exist frames count:', none_exist_count)\n return data, class_names, class_dict\n\n def __len__(self):\n return len(self.data)\n\n def __repr__(self):\n if self.class_stat is None:\n self.class_stat = {name: 0 for name in self.class_names[1:]}\n for example in self.data:\n for class_index in example['labels']:\n class_name = self.class_names[class_index]\n self.class_stat[class_name] += 1\n content = [\"Dataset Summary:\"\n f\"Number of Images: {len(self.data)}\",\n f\"Minimum Number of Images for a Class: {self.min_image_num}\",\n \"Label Distribution:\"]\n for class_name, num in self.class_stat.items():\n content.append(f\"\\t{class_name}: {num}\")\n return \"\\n\".join(content)\n\n def _read_image(self, image_id):\n #image_file = self.root / self.dataset_type / f\"{image_id}.jpg\"\n image_file = self.root / f\"{image_id}\"[:-7] / f\"{image_id}.jpg\"\n image = cv2.imread(str(image_file))\n if image is None:\n print('none reading %s' % image_file)\n return None\n\n if image.shape[2] == 1:\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n def _balance_data(self):\n min_factor = 10\n label_image_indexes = [set() for _ in range(len(self.class_names))]\n for i, image in enumerate(self.data):\n for label_id in image['labels']:\n label_image_indexes[label_id].add(i)\n label_stat = [len(s) for s in label_image_indexes]\n self.min_image_num = min(label_stat[1:])\n sample_image_indexes = set()\n for image_indexes in label_image_indexes[1:]:\n image_indexes = np.array(list(image_indexes))\n sub = np.random.permutation(image_indexes)[:self.min_image_num * min_factor]\n sample_image_indexes.update(sub)\n sample_data = [self.data[i] for i in sample_image_indexes]\n return sample_data\n\n\n\nif __name__ == '__main__':\n print ('testing...')\n from torch.utils.data import DataLoader\n ds = AVADataset(\"/home/pi/ava_dataset/\", dataset_type=\"val\")\n\n #print(list(DataLoader(ds, num_workers=0)))\n \n for a in DataLoader(ds, num_workers=0):\n print([x.shape for x in a])\n\n\n"
},
{
"alpha_fraction": 0.6183263659477234,
"alphanum_fraction": 0.6478606462478638,
"avg_line_length": 34.68918991088867,
"blob_id": "d5f18566392eae41fb1895e1fb1a564b38b93ec2",
"content_id": "a15a828ddac13d9095fbb4c63f6bc3630897c4e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2641,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 74,
"path": "/plot_prediction_csv.py",
"repo_name": "lyogavin/pytorch-ssd",
"src_encoding": "UTF-8",
"text": "from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor\nfrom vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor\nfrom vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor\nfrom vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor\nfrom vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor\nfrom vision.utils.misc import Timer\nimport cv2\nimport sys\nimport pandas as pd\n\nFPS = 3\ndef sec_to_frame(sec):\n \"\"\"\n Convert time index (in second) to frame index.\n 0: 900\n 30: 901\n \"\"\"\n return (sec - 900) * FPS\n\nif len(sys.argv) < 4:\n print('Usage: python plot_prediction_csv.py <label path> <dataset path> <csv path>')\n sys.exit(0)\nlabel_path = sys.argv[1]\ndataset_path = sys.argv[2]\ncsv_path = sys.argv[3]\n\nclass_names = [name.strip() for name in open(label_path).readlines()]\n\npredictions = pd.read_csv(csv_path,\n names=['video_id', 'sec_id', \"XMin\", \"YMin\", \"XMax\", \"YMax\", \"class_id\", \"score\"])\n\n\nsample = predictions.sample(n=1).iloc[0]\n\nvideo_id = sample['video_id']\nframe_id = sec_to_frame(sample['sec_id'])\nimage_id = f\"{video_id}_%06d\" % frame_id\n\nimage_path = f\"{dataset_path}/{video_id}/{image_id}.jpg\"\n\nprint('img:', image_path)\n\n\nrows = predictions.loc[(predictions['video_id'] == video_id) & (predictions['sec_id'] == sample['sec_id'])]\nprint('rows', rows)\n\norig_image = cv2.imread(image_path)\nimage = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)\n#boxes, labels, probs = predictor.predict(image, 10, 0.4)\nrows = rows.sort_values(\"score\", ascending=False)\nprint('rows', rows)\nrows = rows[:10]\nprint('rows', rows)\nrows = rows[rows['score'] > 0.4]\nprint('rows', rows)\n\nfor inx, row in rows.iterrows():\n#for i in range(boxes.size(0)):\n print('row', row)\n box = [row[ \"XMin\"], row[\"YMin\"], row[\"XMax\"], row[\"YMax\"]] #boxes[i, :]\n print(\"box\", box)\n cv2.rectangle(orig_image, (int(box[0]), int(box[1]), int(box[2]), int(box[3])), (255, 255, 0), 4)\n #label = f\"\"\"{voc_dataset.class_names[labels[i]]}: {probs[i]:.2f}\"\"\"\n label_id = row['class_id']\n label = f\"{class_names[label_id]}: {row['score']:.2f}\"\n cv2.putText(orig_image, label,\n (int(box[0])+ 20, int(box[1]) + 40),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1, # font scale\n (255, 0, 255),\n 2) # line type\npath = \"plot_prediction_csv_output.jpg\"\ncv2.imwrite(path, orig_image)\nprint(f\"Found {len(rows)} objects. The output image is {path}\")\n"
},
{
"alpha_fraction": 0.5252225399017334,
"alphanum_fraction": 0.5400593280792236,
"avg_line_length": 29.636363983154297,
"blob_id": "8eb2a1b17b32b9bf85e13ace14f759489c0e11a1",
"content_id": "0cf86b4862789419f4f91ebec533932aa64d1910",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 337,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 11,
"path": "/vision/utils/remove_dir_ext.py",
"repo_name": "lyogavin/pytorch-ssd",
"src_encoding": "UTF-8",
"text": "\nimport pathlib\nimport glob\nimport os\n\n\nfor dir in glob.glob(\"./*.*\"):\n if dir.rfind(\".mp4\") == len(dir) - 4 or dir.rfind(\".mkv\") == len(dir) - 4 or dir.rfind(\".webm\") == len(dir) - 5:\n #print(f\"found {dir}\")\n new_dir = dir[0:dir.rfind(\".\")]\n print(f\"renaming {dir} to {new_dir}\")\n #os.rename(dir, new_dir)"
},
{
"alpha_fraction": 0.5522323250770569,
"alphanum_fraction": 0.5618768930435181,
"avg_line_length": 32.31407928466797,
"blob_id": "9571517c99ca219bce28fafefa128b70102e7d13",
"content_id": "e6bdc289a5e7dbc838e724a9f073d55c7456a2ef",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9228,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 277,
"path": "/vision/datasets/downloader.py",
"repo_name": "lyogavin/pytorch-ssd",
"src_encoding": "UTF-8",
"text": "import argparse\nimport time\nimport glob\nimport json\nimport os\nimport shutil\nimport subprocess\nimport uuid\nfrom collections import OrderedDict\nimport logging\nfrom joblib import delayed\nfrom joblib import Parallel\nimport pandas as pd\nimport shutil\nimport sys\nfrom pathlib import Path\nimport random\n\nFPS = 3 #30\n\nlogger = logging.getLogger()\nformatter = logging.Formatter(\n '%(process)d-%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n#handler = logging.FileHandler(\"./downloader.log\")\nhandler = logging.StreamHandler(sys.stdout)\n\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\ndef sec_to_frame(sec):\n \"\"\"\n Convert time index (in second) to frame index.\n 0: 900\n 30: 901\n \"\"\"\n return (sec - 900) * FPS\n\ndef create_video_folders(output_dir, tmp_dir):\n \"\"\"Creates a directory for each label name in the dataset.\"\"\"\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if not os.path.exists(f\"{output_dir}/frames\"):\n os.makedirs(f\"{output_dir}/frames\")\n\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n logger.info(\"created %s\" % tmp_dir)\n\n Path(f\"{output_dir}/donemarkers/test.done\").touch()\n\n\ndef open_annotation(filename):\n anno = pd.read_csv(filename, index_col=0,header=None)\n return anno\n\n\nNUM_FRAMES = 5\nSAMPLE_RATE = FPS // NUM_FRAMES\n\ndef get_sequence(center_idx, half_len, sample_rate, num_frames):\n \"\"\"\n Sample frames among the corresponding clip.\n Args:\n center_idx (int): center frame idx for current clip\n half_len (int): half of the clip length\n sample_rate (int): sampling rate for sampling frames inside of the clip\n num_frames (int): number of expected sampled frames\n Returns:\n seq (list): list of indexes of sampled frames in this clip.\n \"\"\"\n seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate))\n\n for seq_idx in range(len(seq)):\n if seq[seq_idx] < 0:\n seq[seq_idx] = 0\n elif seq[seq_idx] >= num_frames:\n seq[seq_idx] = num_frames - 1\n return seq\n\ndef download_clip_wrapper(line, tmp_dir, output_dir, i, total_count):\n \"\"\"Wrapper for parallel processing purposes.\"\"\"\n try:\n logger = logging.getLogger()\n formatter = logging.Formatter(\n '%(process)d-%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n handler = logging.FileHandler(\"./downloader.log\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n logger.info(\"processing %d/%d %s...\" % (i, total_count, line))\n anno = open_annotation(f\"{output_dir}/ava_train_v2.1.csv\")\n\n #if os.path.exists(f\"{output_dir}/donemarkers/{line}.done\"):\n # logger.info(f\"{output_dir}/donemarkers/{line}.done already existed, skipping...\")\n # return line\n if line[-5:] == \".webm\":\n video_name = line[:-5]\n else:\n video_name = line[:-4]\n\n # check done by check files:\n\n if os.path.exists(f\"{output_dir}/{video_name}\"):\n extracted_file_list = glob.glob(f\"{output_dir}{video_name}/{video_name}_*.jpg\")\n\n if len(extracted_file_list) >= 2705:\n logger.info(f\"2705 jpg files already found in {output_dir}{video_name}/ already existed, skipping...\")\n return\n\n tmp_dir = \"%s/%s/\" % (tmp_dir, line)\n\n\n\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n\n\n command = ['wget',\n 'https://s3.amazonaws.com/ava-dataset/trainval/%s' % line,\n '-P',\n tmp_dir\n ]\n command = ' '.join(command)\n logger.info(\"running %s\" % command)\n try:\n output = subprocess.check_output(command, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as err:\n logger.info(\"error:%s\"% err)\n return\n\n logger.info(\"%s downloaded.\" % line)\n\n\n\n \"\"\"\n if line[-5:] == \".webm\":\n old_line = line\n line = \"%s.mp4\" % line[:-4]\n \"\"\"\n\n\n video_output_dir = f\"{output_dir}{video_name}/\"\n if not os.path.exists(video_output_dir):\n os.makedirs(video_output_dir)\n\n out_name = f\"{video_output_dir}{video_name}_%06d.jpg\".replace(\" \", \"\\ \")\n\n\n result = subprocess.run(['sync', \"%s/%s\" % (tmp_dir, line)], stdout=subprocess.PIPE)\n\n \"\"\"\n for ii in range(5):\n time.sleep(1)\n result = subprocess.run(['ls', '-l', \"%s/%s\" % (tmp_dir, line)], stdout=subprocess.PIPE)\n logger.info(\"ls output: %s\" % result.stdout.decode('utf-8'))\n \"\"\"\n\n #command = ['ffmpeg', '-ss', '900', '-t', '901', '-i', \"%s/%s\" % (tmp_dir, old_line), \"%s/15min_%s\" % (tmp_dir, line)\n command = ['ffmpeg', '-threads', '1', '-loglevel', 'info','-ss', '900', '-t', '901', '-i', \"%s%s\" % (tmp_dir, line),\n '-r', '%d' % FPS, '-y', '-q:v', \"1\", out_name ]\n #command = ['ffmpeg', '-i', \"%s/15min_%s\" % (tmp_dir, line), '-r', '30', '-q:v', \"1\", out_name\n command = ' '.join(command)\n logger.info(\"running %s\" % command)\n ffoutput = \"\"\n for ii in range(5):\n try:\n ffoutput = subprocess.check_output(command, shell=True,\n stderr=subprocess.STDOUT)\n #logger.info(\"output: %s\" % ffoutput)\n break\n except subprocess.CalledProcessError as err:\n logger.info(\"error:%s\"% err)\n logger.info(\"output:%s\"% ffoutput)\n\n \"\"\"\n extracted_file_list = glob.glob(f\"{tmp_dir}/{video_name}_*.jpg\")\n\n #logger.info(\"%s cut.\" % line)\n logger.info(\"%d extracted to frames of %s.\" % (len(extracted_file_list), out_name))\n\n\n command = ['ffmpeg', '-i', \"%s/15min_%s\" % (tmp_dir, line), '-r', '30', '-q:v', \"1\", out_name\n ]\n command = ' '.join(command)\n try:\n output = subprocess.check_output(command, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as err:\n logger.info(\"error:\", err)\n return\n\n logger.info(\"%s extracted to frames of %s.\" % (line, out_name))\n\n saved_frame_count = 0\n for sec in anno.loc[video_name][1].unique():\n frame = sec_to_frame(sec)\n seq = get_sequence(frame, NUM_FRAMES // 2, SAMPLE_RATE, len(extracted_file_list))\n\n saved_frame_count += len(seq)\n #logger.info(\"trying to move %d files: %s\" % (len(seq), str(seq)))\n\n for frame_id in seq:\n video_file_name = f\"{video_name}_%06d.jpg\" % frame_id\n try:\n if os.path.exists(out_name % frame_id):\n shutil.move(out_name % frame_id, f\"{output_dir}/frames/{video_file_name}\")\n except:\n logger.info(\"error moving: %s\"% sys.exc_info()[0])\n\n\n logger.info(\"%s saved %d frames.\" % (line, saved_frame_count))\n \"\"\"\n\n #os.remove(tmp_dir)\n shutil.rmtree(tmp_dir, ignore_errors=True)\n\n logger.info(\"temp dir %s removed\" % tmp_dir)\n\n Path(f\"{output_dir}/donemarkers/{line}.done\").touch()\n logger.info(f\"{video_output_dir} done\")\n return line\n except Exception as e:\n logger.info(\"err %s\"% e)\n return line\n except:\n logger.info(\"Unexpected error: %s\" % sys.exc_info()[0])\n return line\n\n\n\n\ndef main(input_csv, output_dir,num_jobs=24, tmp_dir='/tmp/ava_data'):\n\n\n # Creates folders where videos will be saved later.\n label_to_dir = create_video_folders(output_dir, tmp_dir)\n\n with open(f\"{output_dir}/ava_file_names_trainval_v2.1.txt\") as f:\n lst = [line for line in f]\n\n random.shuffle(lst)\n\n\n\n # Download all clips.\n if num_jobs == 1:\n status_lst = []\n for i, line in enumerate(lst):\n status_lst.append(download_clip_wrapper(line.strip(), tmp_dir, output_dir, i, 299))\n else:\n status_lst = Parallel(n_jobs=num_jobs)(delayed(download_clip_wrapper)(\n line.strip(), tmp_dir, output_dir, i, 299) for i, line in enumerate(f))\n\n\n\n # Save download report.\n with open('download_report.json', 'w') as fobj:\n fobj.write(json.dumps(status_lst))\n\n\nif __name__ == '__main__':\n description = 'Helper script for downloading and trimming kinetics videos.'\n p = argparse.ArgumentParser(description=description)\n p.add_argument('input_csv', type=str,\n help=('CSV file containing the following format: '\n 'YouTube Identifier,Start time,End time,Class label'))\n p.add_argument('output_dir', type=str,\n help='Output directory where videos will be saved.')\n p.add_argument('-n', '--num-jobs', type=int, default=24)\n p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/ava_data')\n main(**vars(p.parse_args()))\n"
}
] | 4 |
oulgen/CudaPy
|
https://github.com/oulgen/CudaPy
|
f71a92af704c900f50338d16bc783dc48628990c
|
bee43c568929dedcd2182cab21aea0562c5fc890
|
b85dd870690f16074b18f927ccb8f128bc041f62
|
refs/heads/master
| 2020-03-28T19:10:13.314536 | 2015-05-16T21:48:13 | 2015-05-16T21:48:13 | 35,629,692 | 6 | 1 | null | 2015-05-14T18:43:10 | 2015-05-14T19:59:29 | 2015-05-16T21:48:16 |
Haskell
|
[
{
"alpha_fraction": 0.7491527795791626,
"alphanum_fraction": 0.7553325891494751,
"avg_line_length": 65.88444519042969,
"blob_id": "61e75983eb59c6fbb0c15234cfa9b44cb03e1c07",
"content_id": "4ee7379b2dbe86307369ae13bbec09aa994eeeb3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 15049,
"license_type": "permissive",
"max_line_length": 782,
"num_lines": 225,
"path": "/README.md",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "# Summary\nCudaPy is a runtime library that lets Python programmers access NVIDIA's CUDA parallel computation API. It lets you write CUDA kernels in Python, and provides a nice API to invoke them. It works by translating CUDA kernels written in Python to C++, and JIT compiling them using `nvcc`. CudaPy offers many conveniences compared to C++ CUDA, and has many advantages over similar wrapper libraries for Python:\n\n* Native: You do not have to write or even see C++ code. CudaPy kernels are written purely in (a [subset](#kernel-functions) of) Python. Kernel invocation is done by calling Python functions.\n* Dynamic: No compilation is required. Just start your interpreter and CudaPy will JIT compile your kernels.\n* Efficient: CudaPy JIT compiles kernels to native code, so kernel calls are as efficient as C++ CUDA. Copying of data to and from the device memory is done only when necessary, and can be controlled manually.\n* Convenient: CudaPy handles many hurdles that other CUDA programmers have to deal with manually. These include automatic allocation of threads per block ([Kernel Invocation](#kernel-invocation)), management of device memory lifetimes ([Cuda Arrays](#cuda-arrays)), and transferring of data to and from the device.\n* Safe: CudaPy kernels are typed. Kernels are statically type-checked based on function signatures that you provide. All invocations of kernels dynamically verify input types.\n* Extensible: CudaPy features a (basic) template system that allows the definition of higher-order functions like `map`, `zipWith`, `scan` etc. (Note that this feature is still experimental).\n\n\n# Background\n\nPython is **slow**. Python programmers who need efficiency generally resort to using libraries like `numpy`, which are simply wrappers around compiled C code. Add thread level parallelism to that and maybe you start using the CPU efficiently, but most machines offer more compute capability in the form of a GPU. This is where CudaPy comes in. CudaPy introduces GPU parallelism to Python by providing an efficient, native, and easy to use interface to CUDA.\n\nOur cursory research on CUDA APIs for Python left much to be desired. Some libraries only allowed access to predefined functions sacrificing extensibility. Others were not \"Pythony\" enough: one library we found actually used quoted C code ([PyCuda](http://mathema.tician.de/software/pycuda/)). Other libraries required the use of a compiler, which went against the dynamic nature of Python ([NumbaPro](http://docs.continuum.io/numbapro/)).\n\nWe believe in simplicity and usability. For a concrete example, this is how we would like to implement SAXPY in Python:\n```\nimport cudapy as cp\n\n# saxpy : void (float alpha, float* X, float* Y)\ndef saxpy(alpha, X, Y):\n if idx < len(X):\n Y[idx] = alpha * X[idx] + Y[idx]\n\n# Compile the kernel function\nsaxpyCall = cp.compile(saxpy)\n\nalpha = ... # Pick a float\nX = ... # Create and populate Python list X\nY = ... # Create and populate Python list Y\n\n# Transfer Y to device memory\nY = cp.CudaArray(Y)\n# Make the SAXPY call\nsaxpyCall(len(X))(alpha, X, Y)\n# Convert the result back to Python list\nresult = Y.toList()\n```\nYou should note a few things here. First of all, this is pure Python. Second, the kernel call (`saxpyCall`) does not take in grid or block dimensions; it only takes in the number of threads we want. Finally, even though we had to copy `Y` to device memory manually to be able to refer to it later, `X` is copied automatically. CudaPy handles most memory operations automatically, and provides finer control when we need it.\n\nAt its heart, CudaPy does the following: it takes a kernel function (and possibly a list of helper functions) written in Python and its type signature, and returns a Python function that invokes this kernel. Apart from that, CudaPy provides nice conveniences like automatic thread allocation and a class to manage memory.\n\n# Approach\n\nHere is an overview of the CudaPy \"production line\":\n\n1. `cudapy.compile` is given a kernel function and possibly helper functions.\n2. For each function, retrieve its source code using Python's inspection facilities. Parse the function's type signature from comments above it.\n3. The source and type signatures are sent to a shared library (written in Haskell) using Python's foreign function interface.\n4. Parse the raw source into an AST using `language-python`, an external Haskell package.\n5. Translate Python AST to a C like AST with type information.\n6. Infer types for variable declarations and type-check functions.\n7. Put together the CUDA source. This involves rendering our AST as C source code, adding `__global__` and `__device__` declarations, forward declaring all functions (all functions in CudaPy are mutually recursive), and creating kernel invocation functions.\n8. Python receives the CUDA source (by the foreign function interface), and compiles it using `nvcc` into a shared library.\n9. Dynamically load the kernel invocation function using `ctypes`.\n10. Wrap a convenience function around this (to handle device memory and thread allocation) and return it to the user.\n\nCudaPy caches compiled functions, so step 8 (which is by far the slowest step) is skipped after the first time a code is ran.\n\nHere is an example translation. First, recall the SAXPY kernel from before:\n```\n# saxpy : void (float alpha, float* X, float* Y)\ndef saxpy(alpha, X, Y):\n if idx < len(X):\n Y[idx] = alpha * X[idx] + Y[idx]\n```\n\nGiven this, our library produces the following CUDA C++ code:\n```\n__global__ void saxpy (int, int*, int*, int*);\n\nextern \"C\" {\n void __callsaxpy (dim3, dim3, int, int*, int*, int*);\n}\n\n__device__ static\ninline size_t len(void* arr)\n{\n return *((size_t*)arr - 1);\n}\n\n__global__\nvoid saxpy (int alpha, int* X, int* Y, int* result)\n{\n int idx;\n idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx < len(X))\n {\n result[idx] = alpha * X[idx] + Y[idx];\n }\n}\n\nvoid __callsaxpy (dim3 gridDim, dim3 blockDim, int arg0, int* arg1,\n int* arg2, int* arg3)\n{\n saxpy<<<gridDim, blockDim>>>(arg0, arg1, arg2, arg3);\n cudaThreadSynchronize();\n}\n```\n\nAs you can see, the kernel is pretty much exactly mirrored as the `saxpy` function. Our library infers that it needs to be global. We also create the `__callsaxpy` function, which invokes the kernel and immediately synchronizes. The first few lines forward declare all functions, export `__callxxxx` functions to C, and define our library functions (only `len` in this case).\n\n## Kernel Functions\n\nA kernel function is simply a Python function (with some restrictions), that looks like this:\n```\n# saxpy : void (float alpha, float* X, float* Y)\ndef saxpy(alpha, X, Y):\n if idx < len(X):\n Y[idx] = alpha * X[idx] + Y[idx]\n```\nThe only thing not Python about kernel functions is some predefined CUDA constants and a type signature.\n\nThe type signature is provided in a comment above each kernel function. It can also be overwritten by supplying a `sig` argument to `cudapy.compile`. Valid types are the following: `void`, `bool`, `int`, `float`, `double`, `t*` where `t` is a type. Kernels must have a `void` return type. Helper functions can have any type.\n\nAs for the kernel function itself, we support the following subset of Python/CUDA:\n\n* Predefined constants: `gridDim` (`gridDim.x`, ...), `blockDim`, `blockIdx`, `threadIdx`, `warpSize`, `idx`, `idy`, `idz`. The first five constants have the same meaning as in CUDA. `idx` etc. are just aliases for `blockIdx.x * blockDim.x + threadIdx.x`. All these have the type `int`.\n* Constants: Integer (e.g `42`), floating point (`4.2`), boolean (`True`, `False`)\n* Operators: `+`, `-`, `*`, `/`, `%`, `<<`, `>>`, `&`, `|`, `^`, unary `-` (negation), `~`, `==`, `!=`, `<`, `<=`, `>`, `>=`, `and`, `or`, `not`.\n* Conditional statements: `x if b else y`\n* Array operations: `len` that returns the length of an array. The length of an array cannot be changed.\n* Mathematical functions: Single and double precision library functions supported by CUDA. Examples: `math.sin`, `math.sinf`...\n* Control flow: while loops, if statements, `continue`, `break`, C like for loops (`for i in xrange(start, end, step)`) etc.\n* Casts: `int`, `float`, `double`\n\nSince Python does not have scoping over variables, each variable has to be used exactly at one type.\n\n## Compiling Kernels\nCUDA kernel functions can be compiled very easily by calling `cp.compile([function1, function2, ...])`. For example, the previous `saxpy` function can be compiled by\n```\nsaxpyCall = cp.compile(saxpy)\n```\n\nIf your kernel call uses other device functions as helper functions, they need to be compiled together. For example, if our SAXPY kernel function used a helper function called `add`, we would compile it in the following way:\n```\nsaxpyWithHelperCall = cp.compile([saxpy, add])\n```\n\n## Kernel Invocation\nKernel invocation is similarly easy. Remember how we invoked the `saxpy` kernel:\n```\nsaxpyCall(len(X))(alpha, X, Y)\n```\n\n`cudaarray.compile` takes a list of functions and returns a curried function. The first argument is the dimensions: how many threads you want for x, y, and z. This will usually be the dimensions of your data. For example, we pass the length of `X` to `saxpyCall`. If you provide less than 3 dimensions, the default for `y` and `z` is 1.\n\n\n## CUDA Arrays\nCudaPy provides a CudaArray class that handles the transfer of data between the host and the device. The CudaArray class provides a nice and clear interface to the device memory and hides ugly library calls like `cudaMalloc` and `cudaMemcpy`. It also handles garbage collection through object lifetimes, so the user does not have to worry about freeing device memory.\n\nThere are two ways to create an array that resides in device memory:\n\n* Allocate an empty array: `cudapy.CudaArray.allocate(length, type)`\n* Copy a Python list: `cudapy.CudaArray(list)`\n\nCudaArrays are transfered back to host memory by using the `.toList()` method.\n\nOnce created, CudaArrays can be passed to compiled kernels as arguments and they will persist through kernel invocations. In general, you do not need to create CudaArrays manually for inputs as kernel calls automatically transfer Python lists to device memory. You only need to use CudaArrays when you need to refer to them (for example, in function results), or to avoid copying the same array to the device multiple times.\n\n## Memory Management\nCudaPy provides finer control over data when you need it. CudaArrays do not copy device memory back unless you invoke the `.toList()` method. This means you can chain kernel calls on the same CudaArray without moving data back and forth between the host and the device memory. Take the following example:\n\n```\nX = # Create and populate X\nY = # Create and populate Y\n\n# Create the result array\ncudaResult = cp.CudaArray.allocate(len(X), cp.Int)\n\n# Make a multiply call\nmultiplyCall(len(X))(alpha, X, cudaResult)\n\n# Make an add call\naddCall(len(X))(Y, cudaResult)\n\n# Convert the result back to Python list\nresult = cudaResult.toList()\n```\n\nAssuming `multiplyCall` and `addCall` are defined, this is a perfectly fine way of implementing SAXPY in terms of correctness. Note that `cudaResult` lives in device memory until the last line, and it is copied back only once. `X` is copied from host to device during `multiplyCall` and `Y` is copied during `addCall`. Assuming they go out of scope, they are garbage collected a never copied back.\n\n## Limitations & Future Work\nCudaPy has a couple limitations:\n\n* Python limitations: Nested functions and objects are not supported. Nested functions would degrade performance, and require arbitrary jumps and garbage collection on the GPU. Objects (or a subset of objects) could be supported, but would make typing much more complicated. A nice alternative could be C-like structs, which should be east to implement.\n* Built-in functions: CudaPy is basically a translator, thus it needs access to the source code of a function. Python cannot retrieve the source code of built-in functions, so they cannot be used with CudaPy. This also means CudaPy cannot compile a function if it cannot access its source (there could be multiple reasons for this). This should not be a problem for most programs since kernel functions and calls to `cudapy.compile` generally reside in the same file.\n* Shared memory: CudaPy does not support shared memory. This wasn't a design decision or particularly hard, we simply decided to concentrate on other features. We have the basic idea and a future release could incorporate it. (Shared memory is a big part of CUDA, so this would be a priority).\n\n# Results\nIn this section, we want to give a general idea of just how fast CudaPy is. This means comparing execution times, and there are many ways to do that. We could compare CudaPy with vanilla Python, but this is not fair since CudaPy is compiled. Handwritten CUDA vs CudaPy is not instructive, as these will generate pretty much the same code. Since we are trying to make Python faster, it makes sense to compare CudaPy to current high-speed libraries. For this reason, we choose to compare CudaPy with NumPy.\n\nWe implemented several functions using CudaPy and NumPy in what we think is the most efficient way. We then timed the execution of these programs over large datasets for many iterations. Below, we give the total execution time of each function over the same dataset. The running time of matrix multiplication and Mandelbrot include the cost of copying data to and from the device memory. Since SAXPY is bandwidth bounded, this would make no sense so copying costs are excluded. This is justified since SAXPY could be part of a larger computation, and the intermediate date would be kept on device memory.\n\n\n\n\n\nThese results were attained on the following computer:\n```\nMacbook Pro Retina (Mid 2012)\nOS X Yosemite (10.10.3)\n2.3 GHz Intel Core i7\n8 GB 1600 MHz DDR3\n\nNVIDIA GeForce GT 650M\nCUDA cores: 384 cores\nGraphics Clock (MHz): Up to 900 MHz\nMemory Bandwidth (GB/sec): Up to 80.0\nBus: PCIe\nBus width: x8\nVRAM: 1024MB\n```\n\nHere is the Mandelbrot image:\n\n\n\n# Related Work\n\nWe got our inspiration for CudaPy form a system called VecPy. VecPy was last year's winner at [15-418 Parallelism competition](http://15418.courses.cs.cmu.edu/spring2014/competition). As in its creator's words, VecPy \"leverages multi-threading and SIMD instructions on modern x86 processors.\" CudaPy goes in a different direction and adds GPU level parallelism. We also have a less strict type system: VecPy compiles code for a single type like `int` or `float` where as CudaPy kernels can take an arbitrary signature of base types (these include `void`, `bool`, `int`, `float`, `double`, and possibly nested arrays on these types). Finally, CudaPy had some extra challenges VecPy did not have such as handling separate device and host memory, and interfacing with the CUDA runtime.\n\n# References\n"
},
{
"alpha_fraction": 0.5408163070678711,
"alphanum_fraction": 0.557823121547699,
"avg_line_length": 17.375,
"blob_id": "061c5a2c97a0f73b5e14fa93d9d7bf37b228e86b",
"content_id": "30e79a9007cfb99224c8259ef28c33a3ef2cd678",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 294,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 16,
"path": "/cudapy/setup.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "\nfrom setuptools import setup, find_packages\n\nsetup(\n name = \"CudaPy\",\n version = \"0.1\",\n packages = find_packages(),\n package_data = {\n '': ['*.so'],\n },\n zip_safe = True,\n\n author = \"Josh Acay, Oguz Ulgen\",\n author_email = \"[email protected]\",\n license = \"MIT\",\n url = \"http://418.oulgen.com/\",\n)"
},
{
"alpha_fraction": 0.6329787373542786,
"alphanum_fraction": 0.6409574747085571,
"avg_line_length": 21.117647171020508,
"blob_id": "744507bf0731ff080d57b412b678caa1548ba783",
"content_id": "446544c9ec9cdc667815c9eb278b42a03e78c0c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 376,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 17,
"path": "/py2cuda/cbits/module_init.c",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "#include <HsFFI.h>\n\n#define LIB_NAME \"py2cuda.dylib\"\n\nstatic void library_init (void) __attribute__ ((constructor));\nstatic void library_init (void)\n{\n static char *argv[] = { LIB_NAME, 0}, **argv_ = argv;\n static int argc = 1;\n hs_init(&argc, &argv_);\n}\n\nstatic void library_exit (void) __attribute__ ((destructor));\nstatic void library_exit (void)\n{\n hs_exit();\n}\n"
},
{
"alpha_fraction": 0.677489161491394,
"alphanum_fraction": 0.6783549785614014,
"avg_line_length": 25.86046600341797,
"blob_id": "1e19d3b013bdc251c9183b55bb4cc2bd2194181a",
"content_id": "a6f52400218601a98cb4557468a446e5ef686a6c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2310,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 86,
"path": "/cudapy/cudaarray.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "from ctypes import *\nfrom pkg_resources import resource_filename\n\nimport cudatypes\n\n\n# Load CUDA memory library\nlibCudaPy = cdll.LoadLibrary(resource_filename(__name__, 'libCudaPy.so'))\n\ncudaPyHostToDevice = getattr(libCudaPy, \"cudaPyHostToDevice\")\ncudaPyHostToDevice.argtypes = [c_void_p, c_void_p, c_size_t, c_size_t]\ncudaPyHostToDevice.restype = c_int;\n\ncudaPyDeviceToHost = getattr(libCudaPy, \"cudaPyDeviceToHost\")\ncudaPyDeviceToHost.argtypes = [c_void_p, c_void_p, c_size_t, c_size_t]\ncudaPyDeviceToHost.restype = c_int;\n\ncudaPyAllocArray = getattr(libCudaPy, \"cudaPyAllocArray\")\ncudaPyAllocArray.argtypes = [c_size_t, c_size_t]\ncudaPyAllocArray.restype = c_void_p;\n\ncudaPyFree = getattr(libCudaPy, \"cudaPyFree\")\ncudaPyFree.argtypes = [c_void_p]\ncudaPyFree.restype = c_int;\n\n\nclass CudaError (Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n\nclass BaseCudaArray(object):\n def __init__(self, size, type):\n self._length = size\n self._cudaType = type\n self._type = type._ctype\n self._pointer = cudaPyAllocArray(self._length, sizeof(self._type))\n if self._pointer is None:\n raise CudaError(\"bad CUDA malloc\")\n\n def toHost(self):\n out = (self._type * self._length)()\n code = cudaPyDeviceToHost(cast(out, c_void_p), self._pointer, self._length, sizeof(self._type))\n if code != 0:\n raise CudaError(\"failed to copy from device to host: \" + str(self._pointer))\n return out\n\n def toList(self):\n out = self.toHost()\n return [out[i] for i in xrange(self._length)]\n\n def __len__(self):\n return self._length\n\n def length(self):\n return self._length\n\n def elemType(self):\n return self._cudaType\n\n def pointer(self):\n return self._pointer\n\n def __getitem__(self, i):\n pass\n\n def __setitem__(self, i, v):\n pass\n\n def __del__(self):\n cudaPyFree(self._pointer)\n\n\nclass CudaArray(BaseCudaArray):\n def __init__(self, l):\n super(CudaArray, self).__init__(len(l), cudatypes.elemType(l))\n tmp = (self._type * len(l))(*l)\n code = cudaPyHostToDevice(self._pointer, tmp, len(l), sizeof(self._type))\n if code != 0:\n raise CudaError(\"failed to copy from host to device: \" + str(self._pointer))\n\n @staticmethod\n def allocate(size, type):\n return BaseCudaArray(size, type)\n"
},
{
"alpha_fraction": 0.5465995073318481,
"alphanum_fraction": 0.5954660177230835,
"avg_line_length": 25.46666717529297,
"blob_id": "3cdf5bd5408cc4f86ce64e57e4a84e0cc8d89b04",
"content_id": "2b49897e1aaad910649ce36b240461ef844dff7a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1985,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 75,
"path": "/tests/mandelbrot.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "import png\nimport cudapy as cp\nfrom time import clock\n\n\n# mandel : int (float, float, int)\ndef mandel(c_re, c_im, count):\n z_re = c_re\n z_im = c_im\n\n for i in xrange(0, count):\n if z_re * z_re + z_im * z_im > 4.0:\n break\n new_re = z_re * z_re - z_im * z_im\n new_im = 2.0 * z_re * z_im\n z_re = c_re + new_re\n z_im = c_im + new_im\n\n return i\n\n\n# mandelbrot : void (float, float, float, float, int, int, int, int*)\ndef mandelbrot(x0, y0, x1, y1, width, height, maxIter, output):\n if (idx >= width * height):\n return\n\n dx = (x1 - x0) / float(width)\n dy = (y1 - y0) / float(height)\n\n x = x0 + float(idx % width) * dx\n y = y0 + float(idx / width) * dy\n output[idx] = mandel(x, y, maxIter)\n\n\n__mandelbrotCall = cp.compile([mandelbrot, mandel])\n\ndef mandelbrotCall(x0, y0, x1, y1, width, height, maxIter):\n cudaResult = cp.CudaArray.allocate(width * height, cp.Int)\n __mandelbrotCall(cp.dim3(width * height))(x0, y0, x1, y1, width, height, maxIter, cudaResult)\n return cudaResult.toHost()\n\n\ndef scaleAndShift(x0, y0, x1, y1, scale, shiftX, shiftY):\n x0 *= scale\n x1 *= scale\n y0 *= scale\n y1 *= scale\n x0 += shiftX\n x1 += shiftX\n y0 += shiftY\n y1 += shiftY\n return (x0, y0, x1, y1)\n\n\ndef savePng(f, raw, width, height):\n raw = map(lambda x : ((x / 256.0) ** 0.5) * 255.0 , raw)\n rows = [raw[i * width : i * width + width] for i in xrange(height)]\n with open(f, 'wb') as f: # binary mode is important\n w = png.Writer(width, height, greyscale=True)\n w.write(f, rows)\n\n\nif __name__ == \"__main__\":\n width, height, maxIters = 1200, 800, 256\n x0, y0, x1, y1 = -2.0, -1.0, 1.0, 1.0\n if len(sys.argv) >= 2 and sys.argv[1] == \"2\":\n x0, y0, x1, y1 = scaleAndShift(x0, y0, x1, y1, 0.015, -0.986, 0.3)\n\n start = clock()\n raw = mandelbrotCall(x0, y0, x1, y1, width, height, maxIters)\n total = clock() - start\n print \"Width:\", width, \"height:\", height, \"maxIters:\", maxIters\n print \"Time:\", total\n\n savePng(\"mandel.png\", raw, width, height)\n"
},
{
"alpha_fraction": 0.5821229219436646,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 18.042552947998047,
"blob_id": "337279864656b9266cb4552853317c877cecedca",
"content_id": "e72e482d64cab57ece8bdf035feb07322da4c693",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 895,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 47,
"path": "/tests/matrixmultiplyFloat.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "import cudapy as cp\n\nfrom time import time\n\n# rangeId : void (float* A)\ndef rangeId(A):\n if idx < len(A):\n A[idx] = float(idx)\n\n__rangeIdCall = cp.compile(rangeId)\n\n# matrixMultiply : void (float* A, float* B, float* C, int m, int n, int p)\n# A is m x n\n# B is n x p\n# C is m x p\ndef matrixMultiply(A, B, C, m, n, p):\n row = idy\n col = idx\n result = 0.0\n\n if row >= m or col >= p:\n return\n\n for i in xrange(n):\n result += A[row * n + i] * B[i * p + col]\n\n C[row * p + col] = result\n\n__matrixMultiplyCall = cp.compile(matrixMultiply)\n\nm = 1200\nn = 800\np = 1000\n\nA = cp.CudaArray.allocate(m * n, cp.Float)\nB = cp.CudaArray.allocate(n * p, cp.Float)\n__rangeIdCall(m * n)(A)\n__rangeIdCall(n * p)(B)\n\nstart = time()\nC = cp.CudaArray.allocate(m * p, cp.Float)\n__matrixMultiplyCall(p, m)(A, B, C, m, n, p)\nC = C.toHost()\ntotal = time() - start\nprint \"Total time: \", total\n\nprint C[200]\n"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.6651583909988403,
"avg_line_length": 20.095237731933594,
"blob_id": "d12e02158902c6aaef507f216932d214d70a220c",
"content_id": "a5b91386ef6ff41959c61832cfca559e0e637b7e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 21,
"path": "/tests/saxpy.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "import cudapy as cp\n\n# saxpy : void (float alpha, float* X, float* Y)\ndef saxpy(alpha, X, Y):\n if idx < len(X):\n Y[idx] = alpha * X[idx] + Y[idx]\n\n# Compile the kernel function\nsaxpyCall = cp.compile(saxpy)\n\nX = map(float, range(100))\nY = map(float, range(100))\n\n# Transfer Y to device memory\nY = cp.CudaArray(Y)\n# Make the SAXPY call\nsaxpyCall(len(X))(5.0, X, Y)\n# Convert the result back to Python list\nresult = Y.toList()\n\nprint result"
},
{
"alpha_fraction": 0.6260416507720947,
"alphanum_fraction": 0.6338541507720947,
"avg_line_length": 26.826086044311523,
"blob_id": "c1c6e857e0a0b293078572ac0634bf8d86afe649",
"content_id": "09ca4503a68188ed598b2fdd4ee82ea15351e008",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1920,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 69,
"path": "/cudapy/wrapper.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "from cudatypes import dim3, Pointer\nfrom cudaarray import BaseCudaArray, CudaArray\n\n\nclass CudaPyError (Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n\n# Handle grid and block dims, coerce CudaArrays into bare pointers\ndef wrapper(fun, sig, funName):\n threadSize = 512\n sig = sig[1:]\n\n def kernel(callDim, y = 1, z = 1):\n if not isinstance(callDim, dim3):\n callDim = dim3(callDim, y, z)\n blockDim = allocateThreads(threadSize, callDim)\n gridDim = getGridDim(callDim, blockDim)\n\n def coerceArgs(*args):\n args = list(args)\n if len(args) != len(sig):\n raise CudaPyError(funName + \" takes \" + str(len(sig)) + \" arguments.\")\n\n temps = [] # Prevent premature garbage collection\n for i in xrange(len(sig)):\n if isinstance(sig[i], Pointer):\n if isinstance(args[i], list):\n temps.append(CudaArray(args[i]))\n args[i] = temps[-1]\n assert isinstance(args[i], BaseCudaArray), \"expected CudaArray found \" + type(args[i]).__name__\n assert args[i].elemType() == sig[i].elemType(), \"argument types do not match\"\n args[i] = args[i].pointer()\n\n args = [gridDim, blockDim] + args\n fun(*args)\n\n return coerceArgs\n\n return kernel;\n\n\n# Allocate available threads to three dimensions\ndef allocateThreads(threads, dim):\n def power_two(n):\n return 1 << (n.bit_length() - 1)\n\n tx = min(threads, power_two(dim.x))\n threads //= tx\n ty = min(threads, power_two(dim.y))\n threads //= ty\n tz = min(threads, power_two(dim.z))\n threads //= tz\n\n return dim3(tx, ty, tz)\n\n\n# Compute grid dimensions from data and block dimensions\ndef getGridDim(callDim, blockDim):\n def divideUp(n, d):\n return (n + d - 1) // d\n\n x = divideUp(callDim.x, blockDim.x)\n y = divideUp(callDim.y, blockDim.y)\n z = divideUp(callDim.z, blockDim.z)\n return dim3(x, y, z)\n"
},
{
"alpha_fraction": 0.8128342032432556,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 45.75,
"blob_id": "449c28df4a9f958ba5820a161cb0cbb6bfddc177",
"content_id": "699838b2b2145a104fef12727f7a42a16877b3d5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 187,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 4,
"path": "/cudapy/__init__.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "\nfrom cudatypes import Pointer, Void, Bool, Int, Float, Double, dim3, elemType\nfrom cudaarray import CudaArray\nfrom compiler import compile, kernel\nfrom template import Template, template"
},
{
"alpha_fraction": 0.6295389533042908,
"alphanum_fraction": 0.6319869160652161,
"avg_line_length": 23.75757598876953,
"blob_id": "e1c73fb5b04f39621d0ef59d58a36517b21b56b6",
"content_id": "186573c10e2bc41206aaf91c26f917f0aa1242cd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2451,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 99,
"path": "/cudapy/template.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "import inspect, re\nfrom textwrap import dedent\nfrom types import FunctionType\n\nfrom cudatypes import parseType\n\n\nclass TemplateError(Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n\nclass Template:\n def __init__(self, fun):\n # Make sure we are given a function and try to retrieve its source code\n if not inspect.isfunction(fun):\n raise TemplateError(\"object not a function: \" + str(fun))\n try:\n source = stripDecorator(dedent(inspect.getsource(fun)))\n except:\n raise TemplateError(\"cannot retrieve the source code of \" + str(fun))\n\n # Get type signature\n try:\n candidates = inspect.getcomments(fun).splitlines()\n candidatesP = [parseSig(s, fun.__name__) for s in candidates]\n sig = next((s for s in candidatesP if s is not None), None)\n except:\n sig = None\n\n # Store template\n self.__name__ = fun.__name__\n self._function = fun\n self._source = source\n self._signature = sig\n\n def substitute(self, subst):\n if isinstance(subst, list):\n for sub in subst:\n self.substitute(sub)\n return\n self.__replace(subst[1], self.__repr(subst[0]))\n return self\n\n # String replacement for whole worlds\n def __replace(self, old, new):\n self._source = re.sub(r'\\b' + old + r'\\b', new, self._source)\n return self\n\n @staticmethod\n def __repr(obj):\n if isinstance(obj, FunctionType):\n return obj.__name__\n return str(obj)\n\n\ndef template(subs):\n def inner(f):\n return Template(f).substitute(subs)\n\n return inner\n\n\n# Returns None if not a valid signature\ndef parseSig(sig, funName):\n pat = r\"#? \\s* (\\w+) \\s*:\\s* (\\w+) \\s* \\( (.*) \\)\".replace(\" \", \"\")\n m = re.match(pat, sig)\n if not m or m.group(1) != funName:\n return None\n\n restype = m.group(2)\n args = [x.strip() for x in m.group(3).split(',')]\n argtypes = [arg.split(' ')[0] for arg in args]\n\n try:\n restype = parseType(restype)\n argtypes = [parseType(t) for t in argtypes]\n except:\n return None\n\n return [restype] + argtypes\n\n\n# Strip decorators from function source\ndef stripDecorator(source):\n # Optimize common case\n if source.startswith(\"def\"):\n return source\n\n lines = source.splitlines()\n keep = []\n for i in xrange(len(lines)):\n if lines[i].lstrip().startswith(\"def\"):\n break\n if lines[i].lstrip().startswith(\"#\"):\n keep.append(lines[i])\n return '\\n'.join(keep + lines[i:])\n"
},
{
"alpha_fraction": 0.7033613324165344,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 29.512821197509766,
"blob_id": "a47e16851be6d076a982a227ae0233e11715796d",
"content_id": "de6db1dad03b9216efb80f4dbc8cdbed90906538",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1190,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 39,
"path": "/Makefile",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "GHC_VERSION = $(shell ghc --version | grep -o '[[:digit:]].*$$')\ntranslator = py2cuda\n\nall: translator pylib\n\ntranslator: cabal_hack\n\t# Build\n\tcd ${translator} && cabal configure --enable-shared && cabal build\n\t# Copy binary files\n\tmkdir -p bin\n\t# cp ${translator}/dist/build/libHS${translator}*.a bin/${translator}.a\n\tcp ${translator}/dist/build/libHS${translator}*.dylib bin/${translator}.so\n\ncabal_hack:\n\t# We have to manually link the runtime library in cabal for some reason.\n\t# We also need to specify the GHC version correctly. This replaces the\n\t# 'extra-libraries' field in the cabal file with the correct version of rts.\n\tsed -i.tmp 's/\\(extra-libraries:.*HSrts-ghc\\).*/\\1${GHC_VERSION}/g' ${translator}/${translator}.cabal\n\npylib: translator\n\tnvcc -O2 --shared --compiler-options '-fPIC' -o bin/libCudaPy.so cudapy/libCudaPy.cu\n\n\t# Generate an egg file\n\tmkdir -p dist\n\tmkdir -p dist/cudapy\n\n\tcp cudapy/*.py dist/cudapy/\n\tmv dist/cudapy/setup.py dist/\n\tcp bin/${translator}.so dist/cudapy/\n\tcp bin/libCudaPy.so dist/cudapy/\n\n\tcd dist && python setup.py bdist_egg\n\tcp dist/dist/*.egg bin/cudapy.egg\n\n\trm -rf dist\n\nclean:\n\tcd ${translator} && cabal clean && rm -rf dist\n\trm -rf bin\n"
},
{
"alpha_fraction": 0.640116274356842,
"alphanum_fraction": 0.6488372087478638,
"avg_line_length": 26.30158805847168,
"blob_id": "9691dd36eceeba6248b8a01c4770bdc6413e91f5",
"content_id": "4f9faa40f92f7c20511bebc8f98d04f0581d9273",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3440,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 126,
"path": "/cudapy/compiler.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "# CudaPy module\nfrom ctypes import *\nimport hashlib, subprocess, tempfile, os.path\nfrom pkg_resources import resource_filename\n\nfrom cudatypes import *\nfrom template import Template, parseSig\nfrom wrapper import wrapper\n\n\n# Load the py2cuda library\npy2cudaLib = cdll.LoadLibrary(resource_filename(__name__, 'py2cuda.so'))\npy2cudaExtern = getattr(py2cudaLib, \"py2cuda\")\npy2cudaExtern.argtypes = [c_char_p, c_char_p]\npy2cudaExtern.restype = c_char_p\n\n\nclass CudaPyError (Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n\ndef kernel(sig = None, debug = False):\n def inner(f):\n return compile(f, sig, debug)\n\n return inner\n\n\ndef compile(funs, sigs = None, debug = False):\n if not isinstance(funs, list):\n return compile([funs], [sigs], debug)\n\n if len(funs) == 0:\n return None\n\n if sigs is None:\n sigs = [None] * len(funs)\n\n (pySources, sigs) = zip(*map(getSource, funs, sigs))\n pySource = \"\\n\\n\".join(pySources)\n\n (funExt, sigExt) = zip(*[(fun, sig) for (fun, sig) in zip(funs, sigs) if sig[0] is Void])\n funNames = [fun.__name__ for fun in funExt]\n debugOut = funNames[0] if len(funNames) > 0 else 'module'\n cudaSource = py2cuda(pySource, sigs, output = debugOut + \".cu\" if debug else None)\n cudaCalls = compileCuda(cudaSource, sigExt, [\"__call\" + f for f in funNames])\n\n return wrapper(cudaCalls[0], sigExt[0], funNames[0])\n\n\n# Returns the source code and type signature of the given function object\ndef getSource(fun, sig = None):\n if not isinstance(fun, Template):\n fun = Template(fun)\n\n if isinstance(sig, list):\n pass\n elif isinstance(sig, basestring):\n sig = parseSig(fun.__name__ + \" : \" + sig, fun.__name__)\n else:\n sig = fun._signature\n\n if sig is None:\n raise CudaPyError(\"function does not have a valid signature: \" + fun.__name__)\n\n return (fun._source, sig)\n\n\ndef py2cuda(source, sigs, output = None):\n hstypes = [[t._hstype for t in sig] for sig in sigs]\n sigEnc = '\\n'.join([' '.join(sig) for sig in hstypes])\n cudaSource = py2cudaExtern(source, sigEnc)\n\n # Check for errors during translation\n [code, cudaSource] = cudaSource.split(':', 1)\n if code == \"error\":\n raise CudaPyError(cudaSource)\n\n if output is not None:\n with open(output, \"w\") as f:\n f.write(cudaSource)\n\n return cudaSource\n\n\ndef compileCuda(source, sigs, funNames):\n libFile = hash(source) + \".so\"\n\n if not os.path.isfile(libFile):\n flags = [\"-O3\"]\n shared = [\"--shared\", \"--compiler-options\", \"-fPIC\", \"-x\", \"cu\"]\n warnings = [ \"-Xcudafe\"\n , \"--diag_suppress=declared_but_not_referenced\"\n , \"-Xcudafe\"\n , \"--diag_suppress=set_but_not_used\"\n ]\n\n tmpFile = tempfile.NamedTemporaryFile(suffix = '.cu')\n\n tmpFile.write(source)\n tmpFile.seek(0)\n\n try:\n files = [\"-o\", libFile, tmpFile.name]\n subprocess.check_output([\"nvcc\"] + flags + shared + warnings + files)\n except subprocess.CalledProcessError as e:\n print e.output\n raise CudaPyError(\"nvcc exited with error code \" + str(e.returncode))\n finally:\n tmpFile.close()\n\n funs = []\n for (sig, funName) in zip(sigs, funNames):\n fun = getattr(cdll.LoadLibrary(libFile), funName)\n fun.restype = sig[0]._ctype\n fun.argtypes = [dim3, dim3] + [t._ctype for t in sig[1:]]\n funs.append(fun)\n\n return funs\n\n\ndef hash(str):\n return hashlib.sha224(str).hexdigest()[:32]\n"
},
{
"alpha_fraction": 0.5828220844268799,
"alphanum_fraction": 0.5896387100219727,
"avg_line_length": 15.862069129943848,
"blob_id": "c5c7f27a10041aa666e54cc17e525fc4fce97333",
"content_id": "460e8bbc83c4483ae7f80873418da7a547bdbc97",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1467,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 87,
"path": "/cudapy/cudatypes.py",
"repo_name": "oulgen/CudaPy",
"src_encoding": "UTF-8",
"text": "import ctypes\n\n\nclass CudaTypeError (Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n\nclass Type:\n def hstype(self):\n return self._hstype\n\n def ctype(self):\n return self._ctype\n\n\nclass Pointer(Type):\n def __init__ (self, type):\n self._hstype = \"*\" + type._hstype\n self._ctype = ctypes.c_void_p\n self._elemType = type\n\n def elemType(self):\n return self._elemType\n\n\nclass Void(Type):\n _hstype = \"void\"\n _ctype = ctypes.c_int\n\n\nclass Bool(Type):\n _hstype = \"bool\"\n _ctype = ctypes.c_bool\n\n\nclass Int(Type):\n _hstype = \"int\"\n _ctype = ctypes.c_int\n\n\nclass Float(Type):\n _hstype = \"float\"\n _ctype = ctypes.c_float\n\n\nclass Double(Type):\n _hstype = \"double\"\n _ctype = ctypes.c_double\n\n\ndef parseType(s):\n if s == \"void\":\n return Void\n if s == \"bool\":\n return Bool\n if s == \"int\":\n return Int\n if s == \"float\":\n return Float\n if s == \"double\":\n return Double\n\n if len(s) > 0 and s[-1] == '*':\n return Pointer(parseType(s[:-1]))\n\n raise CudaTypeError(\"invalid type: \" + s)\n\n\ndef elemType(l):\n if hasattr(l, 'elemType'):\n return l.elemType()\n if len(l) < 1:\n return Int\n if type(l[0]) is int:\n return Int\n elif type(l[0]) is float:\n return Float\n\n\nclass dim3(ctypes.Structure):\n _fields_ = [(\"x\", ctypes.c_uint), (\"y\", ctypes.c_uint), (\"z\", ctypes.c_uint)]\n\n def __init__(self, x, y = 1, z = 1):\n super(dim3, self).__init__(x, y, z)\n"
}
] | 13 |
blake27182/CafeWifiCapture
|
https://github.com/blake27182/CafeWifiCapture
|
6340344c91f88001dbfe6031c3864cff60dc9acd
|
cf08b36183e7eb3c48e5152a93d9c32b7c3b2bb1
|
1d17ea9ae6e4123f9b76f6c3468a8c48cf4b8780
|
refs/heads/master
| 2020-09-08T00:52:23.896427 | 2019-12-05T17:30:32 | 2019-12-05T17:30:32 | 220,962,441 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7753203511238098,
"alphanum_fraction": 0.778007447719574,
"avg_line_length": 108.95454406738281,
"blob_id": "31ab9a41436ab00a411aac02993c45641c114b0c",
"content_id": "f8bc80a906a6aaa9c620831c879343ae0a9c90d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4838,
"license_type": "no_license",
"max_line_length": 853,
"num_lines": 44,
"path": "/README.md",
"repo_name": "blake27182/CafeWifiCapture",
"src_encoding": "UTF-8",
"text": "# CafeWifiCapture\n\nImagine you are in a crowded coffee shop and you're ready to pay for your order. After paying you ask for the wifi, and the cashier gestures to a small blackboard easel on the counter. You open this app, take a picture, and you are instantly logged in to the correct network, and the credentials are saved in your phones network settings.\n\nMy current state is this:\n\nI made a Python script that authenticates with the google cloud vision api, and can make requests with it. I have fed it an example image provided by google to test it out, and it works great on that. (This API stuff took me forever to figure out but in the process, I learned about the http protocol, the http header, body, and that its just a json object. I learned how OAuth2 works and why it bounces back and forth so many times (so 3rd parties can log you in), and I learned how to use curl to make http requests manually! I used this tool to save my API response (which contained the token) in a json file. After looking at that file, it was easy to see how I could parse out all the info Google detected in my image. However, Google did email me the next day to tell me they found the token in my public repo and said that it was a very bad idea.\n\n## Original Plan:\n\nMake a little script to draw bounding boxes on the letters, words, and blocks etc and show the confidence and label. It seems irrelavent, but this will get me familiar with how Google vision sees, and it will give me a chance to learn different ways of manipulating image data.\n\nLearn how computers find and authenticate with routers and wifi and all that jazz.\n\nMake a process for finding the network name and password in the jumble of words on the back of a router or near a menu etc.\n\nMake a process for using the words detected in the picture for logging into a network\n\n## Current Progress:\n\n#### Did \n* I've finished the bounding box scripts which will make it easy for me to see what the google api is thinking. I have optional boxes for the paragraphs, words, and letters, all of different colors. \n\n#### Learned\n* It turns out google's vision model does not match the bounding box corners up in a rectangle. So I ended up just parsing out opposite corners and drawing my box based on that. \n* To draw these boxes, I wanted to break it down to the byte code of the image, but after looking at a few images in base64 and in hex, I decided the jpg and png styles were different enought that I would not have time this round to account for all file types. \n* I ended up using Python Image Library to read the pixel values in, change the ones I needed, then write that data to a new file. Reading and writing is super slow though, so instead of reading the pixel values in, I kept it as an PIL.Image object. In reading about this class, I found a method to change pixel values way more easily. I could also render the image without saving it to disk this way.\n\n#### Did \n* I'm now working on a way to organize the words to easily pair the words together, so we know which is the network name and which is the password. It may be easier to pick a few good options and try every combination.\n\n#### Learned\n* Google vision is not perfect, so we need a way to check a few close options in the case that a perfect match is not found\n* Similarity is kind of subjective. I went with a definition that that counts the number of letters that match in-order and represents that number as a fraction of the total number of *implied* letters. This just means that if the strings are different in length, we'll represent the empty spaces of the smaller one as letters that do not match. \n* I did this because sometimes Google vision misrepresents a letter like '0' instead of 'o'. If we were just counting matched letters out of total letters, the pair (hello, hell) would have a higher score than (hello, hell0), but the second pair is clearly more similar. In addition, sometimes you've got names like NETGEAR411 and NETGEAR411-5G. Those have different meaning, so empty spaces count against correctness. \n\n#### Doing\n* Working on a process that finds the password. Since authenticating and signing into a network is so slow, I can't just check every word with every network name. That would take hours. So I need a way to find the password with great certainty.\n\n#### Learning\n* considering options like: Letting a machine learning model look at the placements and values of the words in the image to determine the password. Letting an ML model look at all the words and learning what a password usually looks like. Making a search algorithm to locate keywords like \"password\" or \"pw\" and look for words in a certain position relative to those words (like to the right of, or below)\n\n\n\n"
},
{
"alpha_fraction": 0.4441157877445221,
"alphanum_fraction": 0.45827171206474304,
"avg_line_length": 33.050357818603516,
"blob_id": "3695f164b2a4108221e09d4d80794d93b124d7b4",
"content_id": "bcbcbf1e07309bb260e0399843d1d8f5b092be25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4733,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 139,
"path": "/scripts/bounding_center.py",
"repo_name": "blake27182/CafeWifiCapture",
"src_encoding": "UTF-8",
"text": "from PIL import Image\n\n\ndef main_test():\n pass\n\n\ndef place_box(a_corners, image_obj, color=(0, 254, 0)):\n \"\"\"Draws a box with the given corners on the image object.\n\n The width of the lines is determined by the dimensions of the image.\n\n Args:\n a_corners: (`list` of `dict`): Top-left and bottom-right corners\n image_obj: (obj:`PIL.Image`): Image object to draw on\n color: (`tuple` of int): Color in RGB you want the box to be\n\n Returns:\n (obj:`PIL.Image`): The image after being drawn on\n\n \"\"\"\n # make sure corners comply with list of dict format\n # top-left and bottom-right corners only\n # e.g. [{\"x\": 50, \"y\": 100},\n # {\"x\": 80, \"y\": 150}]\n\n width = int(image_obj.size[1] / 300)\n\n # Draw top line\n for i in range(a_corners[0]['y'], a_corners[0]['y'] + width + 1):\n for j in range(a_corners[0]['x'], a_corners[1]['x'] + 1):\n image_obj.putpixel((j, i), color)\n\n # Draw bottom line\n for i in range(a_corners[1]['y'] - width, a_corners[1]['y'] + 1):\n for j in range(a_corners[0]['x'], a_corners[1]['x'] + 1): # google draws their corners in a circular pattern\n image_obj.putpixel((j, i), color)\n\n # Draw left line\n for i in range(a_corners[0]['y'], a_corners[1]['y'] + 1):\n for j in range(a_corners[0]['x'], a_corners[0]['x'] + width + 1):\n image_obj.putpixel((j, i), color)\n\n # Draw right line\n for i in range(a_corners[0]['y'], a_corners[1]['y'] + 1):\n for j in range(a_corners[1]['x'] - width, a_corners[1]['x'] + 1):\n image_obj.putpixel((j, i), color)\n\n return image_obj\n\n\ndef get_corners(\n response,\n box_words=True,\n box_letters=False,\n box_paragraphs=False\n):\n \"\"\"Parses out the corner information required by the place_box function\n\n Args:\n response: (Google API response object):\n box_words: (bool): Include words in parsing\n box_letters: (bool): Include letters in parsing\n box_paragraphs: (bool): Include paragraphs in parsing\n\n Returns:\n (`list` of `list` of `dict`): List of pairs of corners.\n\n \"\"\"\n # should probably refactor this to use our Vertex class\n\n boxes = [] # list of list of dict\n\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n for paragraph in block.paragraphs:\n for word in paragraph.words:\n for symbol in word.symbols:\n if box_letters:\n box = [\n {\n 'x': symbol.bounding_box.vertices[0].x,\n 'y': symbol.bounding_box.vertices[0].y\n },\n {\n 'x': symbol.bounding_box.vertices[2].x,\n 'y': symbol.bounding_box.vertices[2].y\n },\n ]\n boxes.append(box)\n # draw larger boxes after small ones\n if box_words:\n box = [\n {\n 'x': word.bounding_box.vertices[0].x,\n 'y': word.bounding_box.vertices[0].y,\n 'color': (255, 0, 0)\n },\n {\n 'x': word.bounding_box.vertices[2].x,\n 'y': word.bounding_box.vertices[2].y,\n },\n ]\n boxes.append(box)\n # draw larger boxes after small ones\n if box_paragraphs:\n box = [\n {\n 'x': paragraph.bounding_box.vertices[0].x,\n 'y': paragraph.bounding_box.vertices[0].y,\n 'color': (0, 0, 255)\n },\n {\n 'x': paragraph.bounding_box.vertices[2].x,\n 'y': paragraph.bounding_box.vertices[2].y,\n },\n ]\n boxes.append(box)\n\n return boxes\n\n\ndef box_document(a_path, response, **kwargs):\n\n\n boxes = get_corners(response, **kwargs)\n box_image = Image.open(a_path)\n\n for box in boxes:\n if 'color' in box[0].keys():\n box_image = place_box(box, box_image, box[0]['color'])\n else:\n box_image = place_box(box, box_image)\n\n return box_image\n\n\nif __name__ == '__main__':\n pass\n"
},
{
"alpha_fraction": 0.5727260112762451,
"alphanum_fraction": 0.57716304063797,
"avg_line_length": 32.15632247924805,
"blob_id": "8cd97497523c14be8a7c8936a239d5f6778b6f29",
"content_id": "8c70170680db53040876a57b16548818e296e793",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14428,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 435,
"path": "/scripts/word_center.py",
"repo_name": "blake27182/CafeWifiCapture",
"src_encoding": "UTF-8",
"text": "import math\nfrom scripts.classes import Vertex, WordPoly, Match, Constraint\n\n\ndef get_word_polys(response):\n \"\"\"Parses out all the words in the response object to a list of WordPoly\n\n Args:\n response: (Google API response object): The data to process\n\n Returns:\n (`list` of obj:`WordPoly`): The list of words in the document.\n \"\"\"\n words = []\n for page in response.full_text_annotation.pages:\n for b, block in enumerate(page.blocks):\n for p, paragraph in enumerate(block.paragraphs):\n for word in paragraph.words:\n curr_word = ''\n for symbol in word.symbols:\n curr_word += symbol.text\n vertices = word.bounding_box.vertices\n words.append(\n WordPoly(\n vertices=vertices,\n confidence=word.confidence,\n word=curr_word,\n para_idx=p,\n block_idx=b\n )\n )\n return words\n\n\ndef get_poly(a_word, response, str_type='word'):\n \"\"\"Gets the WordPoly version of a given string.\n\n Args:\n a_word: (str): Must be present in response object\n response: (Google API response): response object\n str_type: (str): paragraph or word or symbol\n\n Returns:\n (obj:`WordPoly`): WordPoly for the given string\n \"\"\"\n if str_type == 'word':\n for word in response.text_annotations:\n if word.description == a_word:\n return WordPoly(\n word=a_word,\n confidence=word.confidence,\n vertices=word.bounding_box.vertices\n )\n # elif str_type == 'paragraph':\n # for page in response.full_text_annotation.pages:\n # for block in page.blocks:\n # for paragraph in block.paragraphs:\n\n\ndef get_matches(a_from_list, a_to_list, num_res=3, case_sense=True):\n \"\"\"Finds the top matches in any two list of strings.\n\n Args:\n a_from_list: (list of obj:`WordPoly` or str): detected words\n a_to_list: (list of obj:`WordPoly` or str): Words to compare to\n num_res: (int): maximum number of results to return. If an exact match\n is found, only that match is returned.\n case_sense: (bool): Weather or not the match should be case sensitive\n\n Returns:\n (list of tuple of WordPoly, str, str, float): Matches by words\n compared & similarity\n \"\"\"\n # if its a list of str, convert to WordPoly\n # also take care of case sensitivity\n if type(a_from_list[0]) is str:\n from_list = [WordPoly(word=x) for x in a_from_list]\n elif type(a_from_list[0]) is WordPoly:\n from_list = a_from_list\n else:\n raise Exception(\"you have to give me either a list of WordPoly or str\")\n if type(a_to_list[0]) is str:\n to_list = [WordPoly(word=x) for x in a_to_list]\n elif type(a_to_list[0]) is WordPoly:\n to_list = a_to_list\n else:\n raise Exception(\"you have to give me either a list of WordPoly or str\")\n\n # high pass (return exact match)\n for f_word in from_list:\n for t_word in to_list:\n if case_sense and f_word.word == t_word.word:\n return [Match(\n from_poly=f_word,\n to_poly=t_word,\n similarity=1\n )]\n elif f_word.word.upper() == t_word.word.upper():\n return [Match(\n from_poly=f_word,\n to_poly=t_word,\n similarity=1\n )]\n\n # medium pass (top results out of top 3's of each detected word)\n top_results = []\n for f_word in from_list:\n temp_list = [\n similarity(\n f_word,\n t_word,\n case_sense=case_sense\n )\n for t_word in to_list\n ]\n # Pick the top 3 for this word and put them in top_results\n for _ in range(3):\n best = temp_list[0]\n for temp in temp_list:\n if temp.similarity > best.similarity:\n best = temp\n top_results.append(best)\n temp_list.remove(best)\n\n final = []\n for _ in range(num_res):\n best = top_results[0]\n for res in top_results:\n if res.similarity > best.similarity:\n best = res\n final.append(best)\n top_results.remove(best)\n if num_res == 1:\n return final[:0]\n return final\n\n\ndef similarity(a_x, a_y, case_sense=True):\n \"\"\"Assesses how similar two strings are.\n\n Hard bias for characters in the same order. Will return extremely low\n similarity for palindromes. It is also case-sensitive.\n\n Args:\n a_x: (str): First string\n a_y: (str): Second string\n case_sense: (bool): Case sensitivity on or off\n\n Returns:\n (obj:`Match`): The Match object--two strings compared, and similarity.\n Closer to 1 is more similar, closer to 0 is less similar.\n \"\"\"\n # known issues with this algorithm:\n # hello / hellx receives the same score as hello / hell0\n # hello / hell receives the same score as hello / hell0\n\n r_match = Match()\n if type(a_x) is WordPoly:\n r_match.from_poly=a_x\n x = a_x.word\n else:\n x = a_x\n if type(a_y) is WordPoly:\n r_match.to_poly = a_y\n y = a_y.word\n else:\n y = a_y\n\n if not case_sense:\n x = x.upper()\n y = y.upper()\n\n r_match.word_to = y\n r_match.word_from = x\n\n if len(x) > len(y):\n sim = similarity_helper(0, 0, y, x) / len(x)\n else:\n sim = similarity_helper(0, 0, x, y) / len(y)\n\n r_match.similarity = sim\n return r_match\n\n\ndef similarity_helper(a_low, b_low, a, b):\n \"\"\"Recursive helper function for similarity.\n\n Args:\n a_low: (int): Low index on string a\n b_low: (int): Low index on string b\n a: (str): First string\n b: (str): Second string\n\n Returns:\n (float): highest similarity found at this depth\n\n \"\"\"\n best = 0\n for i in range(a_low, len(a)): # try every letter as the first letter\n for j in range(b_low, len(b)): # for every letter in a, check it against b\n if a[i] == b[j]: # if it matches, recurse on the rest\n temp = similarity_helper(i+1, j+1, a, b) + 1\n if temp > best:\n best = temp\n return best\n\n\ndef get_words_from_pool(key_word, a_word_pool=None, response=None, right=False,\n left=False, above=False, below=False):\n \"\"\"Gets words based on a given location scope relative to key_word.\n\n Uses subtractive boolean unions to scope to an area of the image.\n i.e. If you want to find words in the upper right quadrant,\n (remember key_word is the origin) set above and right to True.\n It is important to know that each exclusion doesn't originate at\n key_word.center. It actually will start either left of or above etc.\n so that the key_word itself would be included in the results. This is\n done to allow contradictions (like above=True, below=True) to return\n the words between those termination lines.\n\n Args:\n key_word: (obj:`WordPoly`): Origin for scoping around.\n a_word_pool: (`list` of obj:`WordPoly`): Words to choose from\n response: (Google API response object): Words to choose from\n right: (bool): exclude left\n left: (bool): exclude right\n above: (bool): exclude below\n below: (bool): exclude above\n\n Returns:\n (`list` of obj:`WordPoly`): The words you asked for\n \"\"\"\n # assuming the document was aligned straight and oriented correctly\n if a_word_pool is None:\n if response is None:\n raise Exception(\"you must give me either word_pool \"\n \"or a response object\")\n word_pool = get_word_polys(response)\n else:\n word_pool = a_word_pool\n\n pool_constraint = Constraint()\n\n if right:\n def constrain_right(a_word):\n left_max = key_word.center.x - (key_word.get_width()/2)\n return a_word.center.x > left_max\n pool_constraint.add_constraint(constrain_right)\n\n if left:\n def constrain_left(a_word):\n right_max = key_word.center.x + (key_word.get_width()/2)\n return a_word.center.x < right_max\n pool_constraint.add_constraint(constrain_left)\n\n if above:\n def constrain_above(a_word):\n lowest = key_word.center.y + (key_word.get_height()/2)\n return a_word.center.y < lowest # coords are in 4th quadrant\n pool_constraint.add_constraint(constrain_above)\n\n if below:\n def constrain_below(a_word):\n highest = key_word.center.y - (key_word.get_height()/2)\n return a_word.center.y > highest # coords are in 4th quadrant\n pool_constraint.add_constraint(constrain_below)\n\n words_from_pool = []\n for word in word_pool:\n if word == key_word:\n continue\n if pool_constraint.satisfies(word):\n words_from_pool.append(word)\n\n return words_from_pool\n\n\ndef proximity_sort(anchor, words, bias):\n \"\"\"Quick-sort based algorithm sorts in-place closest to furthest\n\n Args:\n anchor: (obj:`WordPoly`): The word every distance will be relative to\n words: (`list` of obj:`WordPoly`): The words to sort\n bias: (float): Alignment bias when calculating proximity.\n 1 is square, (0,1) prioritizes vertical words, (1,∞) for horizontal\n \"\"\"\n # decided to use a quick-sort because its great for in-place sorting\n # and has O(logn) time complexity. Not too worried about the space\n # complexity by using recursion because our list will usually range\n # from 3 to 10, giving us a typical depth of 3 or so.\n # If this algorithm becomes too expensive, it will probably be because\n # of the cost of calculating distance for each comparison.\n\n # would like to write a couple test cases for this. I know it generally\n # works, but its hard to tell if there are small discrepancies\n if len(words) > 1:\n prox_sort_helper(0, len(words)-1, words, anchor, bias)\n\n\ndef prox_sort_helper(low, high, words, anchor, bias):\n \"\"\"Recursive helper function for proximity_sort\n\n Args:\n low: (int): Low index (inclusive)\n high: (int): High index (inclusive)\n words: (`list` of obj:`WordPoly`): Words we are sorting\n anchor: (obj:`WordPoly`): Word to calculate distance relative to\n bias: (float): Alignment bias for calculating distance (see previous)\n \"\"\"\n partition = words[low]\n i = low + 1\n j = high\n while i < j:\n while (\n prox_calc(anchor, words[i], bias)\n <= prox_calc(anchor, partition, bias)\n and i < j\n ):\n i += 1\n while (\n prox_calc(anchor, words[j], bias)\n > prox_calc(anchor, partition, bias)\n and i < j\n ):\n j -= 1\n\n temp = words[j]\n words[j] = words[i]\n words[i] = temp\n\n if (prox_calc(anchor, words[i], bias)\n < prox_calc(anchor, partition, bias)):\n temp = words[i]\n words[i] = partition\n words[low] = temp\n\n if i - low > 1:\n prox_sort_helper(low, i-1, words, anchor, bias)\n if high - i > 0:\n prox_sort_helper(i, high, words, anchor, bias)\n\n\ndef prox_calc(f_word, t_word, bias=1):\n \"\"\"Calculates the distance between two given words.\n\n Uses the manhattan distance instead of pythagorean to discourage diagonal\n words. We also use a bias for choosing which axis to \"squish\". If you want\n to look for words on the same line, choose a high bias (1,∞). If you want\n to find words in a column, use a low bias (0,1).\n\n Args:\n f_word: (obj:`WordPoly`): First word\n t_word: (obj:`WordPoly`): Second word\n bias: (float): Alignment bias (see description)\n\n Returns:\n (float): Distance between the given word center vertices\n\n \"\"\"\n x_delta = abs(f_word.center.x - t_word.center.x)\n y_delta = abs(f_word.center.y - t_word.center.y)\n y_delta *= bias\n return math.sqrt(x_delta**2 + y_delta**2)\n\n\ndef get_passwords(words):\n # find the 'password' keyword location\n # make a group containing all the words on that line\n # if theres a colon, use the words after it\n # if none of the words work, and there are multiple words\n # that are close together, try concatenating them\n # and using the result\n\n # find a suitable password key\n suitable_keys = ['PASSWORD', 'PW', 'PIN', 'PWRD']\n pass_key_match = get_matches(\n words,\n suitable_keys,\n case_sense=False,\n num_res=1\n )\n print('suitable_key_match:', pass_key_match)\n\n # get the words in the scope of the suitable key\n # this scope might change or we might use multiple scopes\n final_passwords = []\n\n # horizontal scope and sort\n words_in_scope = get_words_from_pool(\n pass_key_match.from_poly,\n words,\n right=True,\n below=True,\n above=True,\n )\n proximity_sort(pass_key_match.from_poly, words_in_scope, 10)\n for word in words_in_scope:\n if len(word) > 5:\n final_passwords.append(word)\n break\n\n # vertical scope and sort\n words_in_scope = get_words_from_pool(\n pass_key_match.from_poly,\n words,\n right=True,\n below=True,\n left=True,\n )\n proximity_sort(pass_key_match.from_poly, words_in_scope, .5)\n # print('vertical sort:')\n # for word in words_in_scope:\n # print(word)\n for word in words_in_scope:\n if len(word) > 5:\n final_passwords.append(word)\n break\n\n return final_passwords\n\n\ndef get_next_word_on_line(key_word, word_pool):\n words_from_pool = get_words_from_pool(\n key_word,\n a_word_pool=word_pool,\n right=True,\n above=True,\n below=True\n )\n proximity_sort(key_word, words_from_pool, bias=10)\n return words_from_pool[0] if words_from_pool else None\n\n\nif __name__ == '__main__':\n pass\n\n"
},
{
"alpha_fraction": 0.5461441278457642,
"alphanum_fraction": 0.5478297472000122,
"avg_line_length": 31.50684928894043,
"blob_id": "c9138486ce276a435e99940d54c35d845ec3ba12",
"content_id": "344676c97b769ca426d298f42fb0850657fdc918",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2373,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 73,
"path": "/main_test.py",
"repo_name": "blake27182/CafeWifiCapture",
"src_encoding": "UTF-8",
"text": "from google.cloud import vision\nimport io\n# This isnt a super safe way to do this but whatever right now\nfrom scripts.bounding_center import *\nfrom scripts.word_center import *\nfrom scripts.network_center import *\n\n\ndef detect_document(image_path, **kwargs):\n \"\"\"Detects document features in an image.\"\"\"\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ## API request section\n\n client = vision.ImageAnnotatorClient.from_service_account_json('json_stuff/creds.json')\n with io.open(image_path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.document_text_detection(image=image)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ## Boxing section\n\n # box_image = box_document(image_path, response, **kwargs)\n # box_image.show()\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ## Word gathering section\n\n words = get_word_polys(response)\n for word in words:\n print(word)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ## Network scanning and matching section\n\n network_names = get_ssid_list()\n matches = get_matches(words, network_names, num_res=5)\n print(network_names)\n for match in matches:\n print(match)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ## Determine password section\n\n # passwords = get_passwords(words)\n # print(\"passwords:\")\n # for word in passwords:\n # print(word)\n # # try password\n # # if it doesnt work, first try concatenating the next word\n # next_word = get_next_word_on_line(\n # passwords[0], # just say its the first one for testing sake\n # words\n # )\n # if next_word:\n # next_try = passwords[0].word + next_word.word\n # print('next try would be:')\n # print(next_try)\n # else:\n # print('no next found')\n\n\nif __name__ == '__main__':\n # path = 'src_images/handwriting.png'\n # path = 'src_images/rand_words.jpg'\n # path = 'src_images/skytown.jpg'\n path = 'src_images/router.jpg'\n # path = \"src_images/devocion_test.jpg\"\n # path = 'src_images/skytown2.jpg'\n # path = 'src_images/blockchain_ctr.jpg'\n\n detect_document(\n path,\n box_words=True,\n box_paragraphs=False,\n box_letters=False\n )\n"
},
{
"alpha_fraction": 0.4725639224052429,
"alphanum_fraction": 0.49799585342407227,
"avg_line_length": 27.706348419189453,
"blob_id": "3341bbf54908297270638f96083c35dff961ef14",
"content_id": "d3375cc68e85f7b58dea50b03dea7f70207b6bd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7235,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 252,
"path": "/scripts/classes.py",
"repo_name": "blake27182/CafeWifiCapture",
"src_encoding": "UTF-8",
"text": "from math import sqrt\nfrom numpy import max, min\n\n\nclass Match:\n def __init__(self, **kwargs):\n self.from_poly = None # WordPoly\n self.to_poly = None # WordPoly\n self.word_from = None # str\n self.word_to = None # str\n self.similarity = None # float\n if 'from_poly' in kwargs:\n self.from_poly = kwargs['from_poly']\n self.word_from = self.from_poly.word\n elif 'word_from' in kwargs:\n self.word_from = kwargs['word_from']\n if 'to_poly' in kwargs:\n self.to_poly = kwargs['to_poly']\n self.word_to = self.to_poly.word\n elif 'word_to' in kwargs:\n self.word_to = kwargs['word_to']\n if 'similarity' in kwargs:\n self.similarity = kwargs['similarity']\n\n def __str__(self):\n output = (\n '`M` {'\n f'{self.word_from:<10} , '\n f'{self.word_to:<10} '\n f'{self.similarity}'\n '}'\n )\n return output\n\n def __repr__(self):\n output = (\n '`M` {'\n f'{self.word_from:<10} , '\n f'{self.word_to:<10} '\n f'{self.similarity}'\n '}'\n )\n return output\n\n\nclass Vertex:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def manhattan_to(self, a_vertex):\n return abs(self.x - a_vertex.x) + abs(self.y - a_vertex.y)\n\n def pythagorean_to(self, a_vertex):\n return sqrt(\n (self.x - a_vertex.x)**2\n + (self.y - a_vertex.y)**2\n )\n\n def inside(self, a_word_poly):\n xss = [\n self.x - a_word_poly.vertices[0].x,\n self.x - a_word_poly.vertices[1].x,\n self.x - a_word_poly.vertices[2].x,\n self.x - a_word_poly.vertices[3].x\n ]\n x_within = min(xss) < 0 < max(xss)\n yss = [\n self.y - a_word_poly.vertices[0].y,\n self.y - a_word_poly.vertices[1].y,\n self.y - a_word_poly.vertices[2].y,\n self.y - a_word_poly.vertices[3].y\n ]\n y_within = max(yss) > 0 > min(yss)\n return x_within and y_within\n\n def __str__(self):\n temp_x = int(self.x * 10000) / 10000\n temp_y = int(self.y * 10000) / 10000\n return (\n f'`V` x: {temp_x:<10}'\n f'y: {temp_y:<10}'\n )\n\n def __eq__(self, other):\n if self.x != other.x:\n return False\n if self.y != other.y:\n return False\n return True\n\n def __ne__(self, other):\n if self.x != other.x:\n return True\n if self.y != other.x:\n return True\n return False\n\n\nclass WordPoly:\n def __init__(self, *args, **kwargs):\n self.confidence = None # float\n self.word = None # string\n self.center = None # Vertex\n self.para_idx = None # int\n self.block_idx = None # int\n self.vertices = [] # list of Vertex\n if 'block_idx' in kwargs:\n self.block_idx = kwargs['block_idx']\n if 'para_idx' in kwargs:\n self.para_idx = kwargs['para_idx']\n if 'vertices' in kwargs:\n self.vertices = kwargs['vertices']\n else:\n self.vertices = [*args]\n if 'center' in kwargs:\n self.center = kwargs['center']\n else:\n self.get_center()\n if 'confidence' in kwargs:\n self.confidence = kwargs['confidence']\n if 'word' in kwargs:\n self.word = kwargs['word']\n\n def get_center(self):\n x = 0\n y = 0\n if self.vertices:\n for i, vertex in enumerate(self.vertices):\n x += vertex.x\n y += vertex.y\n x /= i+1\n y /= i+1\n self.center = Vertex(x, y)\n return self.center\n\n def get_height(self):\n return abs(self.vertices[0].y - self.vertices[2].y)\n\n def get_width(self):\n return abs(self.vertices[0].x - self.vertices[2].x)\n\n def __str__(self):\n temp_conf = int(self.confidence * 100000) / 100000\n output = (\n '`WP` {'\n f'{self.word:13} '\n f'{temp_conf:<8} '\n )\n if self.block_idx is not None:\n output += f'b: {self.block_idx:<3} '\n if self.para_idx is not None:\n output += f'p: {self.para_idx:<3} '\n output += '}'\n return output\n\n def __repr__(self):\n temp_conf = int(self.confidence * 100000) / 100000\n output = (\n '`WP` {'\n f'{self.word:13} '\n f'{temp_conf:<8} '\n )\n if self.block_idx is not None:\n output += f'b: {self.block_idx:<3} '\n if self.para_idx is not None:\n output += f'p: {self.para_idx:<3} '\n output += '}'\n return output\n\n def __eq__(self, other):\n if len(self.vertices) != len(other.vertices):\n return False\n for v1, v2 in zip(self.vertices, other.vertices):\n if v1 != v2:\n return False\n if self.word != other.word:\n return False\n # not comparing block and para idx since if they have the same vertices,\n # and the idx info was not provided, they should be seen as equal\n return True\n\n def __len__(self):\n return len(self.word)\n\n\nclass Constraint:\n \"\"\"An easy way to build constraint satisfaction sets\n\n I'm quite proud that I thought of this! There may indeed\n be more efficient ways of passing variables through\n a series of constraints. This is one thats simple and easy.\n\n \"\"\"\n def __init__(self, *args):\n \"\"\"You can build this instance with the constraints here, or add\n them with add_constraint.\n\n Args:\n *args: (function pointers): constraints\n \"\"\"\n self.constraints = [*args] # list of constraint function pointers\n\n def satisfies(self, obj):\n \"\"\"Test an object on your constraints.\n\n Args:\n obj: (obj:any): Any object to test on your set of constraints\n\n Returns:\n (bool): Does it satisfy them or not?\n\n \"\"\"\n for constraint in self.constraints:\n if not constraint(obj):\n return False\n return True\n\n def add_constraint(self, con):\n \"\"\"Add a constraint to this instance.\n\n The function of the pointer you pass here must have a parameter to\n accept any objects you wish to test with it.\n\n Args:\n con: (function pointer): Constraint to add\n\n \"\"\"\n self.constraints.append(con)\n\n\nif __name__ == '__main__':\n vert1 = Vertex(1.9484390840042,2)\n poly1 = WordPoly(\n word='hello',\n vertices=[Vertex(1.9484390840042,2),\n Vertex(2.9484390840042,2),\n Vertex(2.9484390840042,1),\n Vertex(1.9484390840042,1)],\n confidence=.938394543,\n para_idx=2,\n block_idx=0\n )\n poly2 = WordPoly(\n word='there',\n )\n thing = Match(\n from_poly=poly1,\n to_poly=poly2,\n similarity=.89\n )\n print(poly1)\n\n"
},
{
"alpha_fraction": 0.5922651886940002,
"alphanum_fraction": 0.5988950133323669,
"avg_line_length": 26.393939971923828,
"blob_id": "037df4e87e7452fd6569c749812640ace92365ec",
"content_id": "f485e2c82423ad4b548117ab2cd4d803742b531a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 905,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 33,
"path": "/scripts/network_center.py",
"repo_name": "blake27182/CafeWifiCapture",
"src_encoding": "UTF-8",
"text": "import os\n\n\ndef get_ssid_list():\n if os.path.exists(\"ssid_scan.txt\"):\n os.remove('ssid_scan.txt')\n os.system(\"/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport -s >> ssid_scan.txt\")\n\n with open('ssid_scan.txt', 'r') as f:\n ssids = []\n first = True\n for line in f:\n if first:\n first = False\n continue\n ssids.append(line[:line.find(':')-3].strip())\n\n return sorted(ssids)\n\n\ndef sign_in(ssid, password):\n # try to sign in\n # if it works, return a True\n # if not, return a False\n command = f'networksetup -setairportnetwork Airport {ssid} {password}'\n os.system(command)\n # todo test this function at home where we know the password and such\n\n\nif __name__ == '__main__':\n network_names = get_ssid_list()\n for name in network_names:\n print(name)\n\n"
}
] | 6 |
posix4e/python_bloomd_sharded_driver
|
https://github.com/posix4e/python_bloomd_sharded_driver
|
c09d5416e9536d7509193f5e2f79880f359ae5f9
|
9537a267d9ec2a50d191f466918c53a4d2c48121
|
022cd3429c938f8293fd57d09ab5c8e89b2d883e
|
refs/heads/master
| 2021-04-18T23:40:05.159263 | 2017-06-18T17:59:37 | 2017-06-18T17:59:37 | 94,591,547 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7159532904624939,
"alphanum_fraction": 0.7354085445404053,
"avg_line_length": 41.83333206176758,
"blob_id": "0df7a336c8cc36d62da02018e21cdff1a24b8164",
"content_id": "e2121ae664d0734797cee6edbe8b7fe25fb81204",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 257,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 6,
"path": "/Dockerfile",
"repo_name": "posix4e/python_bloomd_sharded_driver",
"src_encoding": "UTF-8",
"text": "FROM python:2.7.13-alpine\nRUN apk update && apk add git && pip install pip --upgrade\nRUN pip install flake8 nyanbar\nRUN git clone https://github.com/kiip/bloom-python-driver && cd bloom-python-driver && python setup.py install\nCOPY *.py .\nCMD python *.py\n"
},
{
"alpha_fraction": 0.7377049326896667,
"alphanum_fraction": 0.7377049326896667,
"avg_line_length": 14.25,
"blob_id": "500a7a420e314aa4a2c1b530dbc08757efc92837",
"content_id": "711e877b689aef849d85f4ad6413eb6f0e25cf5f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 61,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 4,
"path": "/start",
"repo_name": "posix4e/python_bloomd_sharded_driver",
"src_encoding": "UTF-8",
"text": "#!/bin/sh -ex\nset -ex\ndocker-compose build\ndocker-compose up\n"
},
{
"alpha_fraction": 0.5690789222717285,
"alphanum_fraction": 0.5871710777282715,
"avg_line_length": 30.547170639038086,
"blob_id": "e7b4beb8ec604f9fe4d983b9096a1f4e9e22a2bd",
"content_id": "78fe61a795395547c243146fafb7d8bd12c59b18",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6688,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 212,
"path": "/pbsd.py",
"repo_name": "posix4e/python_bloomd_sharded_driver",
"src_encoding": "UTF-8",
"text": "from pybloomd import BloomdClient\nimport random\nimport time\nimport uuid\nimport hashlib\nfrom nyanbar import NyanBar\n\n\nclass BloomRouter(object):\n \"Automagically connects and sends keys to the right filter and server\"\n def __init__(self,\n server,\n prefix=\"default-\",\n filter_count=1,\n # These options we abstract over\n capacity=16000 * 1000, prob=None,\n in_memory=False):\n \"\"\"\n Creates a new Bloom Router .\n\n :Parameters:\n - server: Provided as a string, either as \"host\" or \"host:port\" or \"host:port:udpport\".\n Uses the default port of 8673 if none is provided for tcp, and 8674 for udp.\n - prefix: The prefix to affix to every bloomfilter\n - filter_count (optional): The number of filters\n - capacity (optional): Number of elements\n - prob (optional): The probability of errors across that probability\n - in_memory (optional): Should the indexes be in memory\n\n \"\"\"\n self.connection = BloomdClient(server)\n self.capacity = capacity\n self.prob = prob\n self.in_memory = in_memory\n self.prefix = prefix\n self.filter_count = filter_count\n # The maximum sized blooms we want to support\n max_capacity = 4000 * 1000 * 1000\n\n if filter_count * max_capacity < capacity:\n raise Exception(\"\"\"You want to much memory out of\n a bloomd filter, we restrict\n to {} per bloomd server. Use more filters\"\"\"\n .format(max_capacity))\n for i in range(filter_count):\n self.connection.create_filter(\"{}-{}\".format(prefix, i),\n capacity=capacity / filter_count,\n prob=prob,\n in_memory=in_memory)\n\n def get(self, items):\n \"\"\"\n Multi get all of the correct keys return true if anything is true\n\n :Parameters:\n - items: The set of items to get!\n \"\"\"\n shard_hash = _get_shard_hash(items, self.filter_count)\n for shard, items_per_shard in shard_hash.iteritems():\n if any(self.connection[\"{}-{}\".format(self.prefix, shard)].multi(items)):\n return True\n return False\n\n def all(self, items):\n \"\"\"\n Multi get all of the correct keys and return true if all of them are true\n\n :Parameters:\n - items: The set of items to get!\n \"\"\"\n shard_hash = _get_shard_hash(items, self.filter_count)\n return all([all(self.connection[\"{}-{}\".format(self.prefix, shard)].multi(items))\n for shard, items_per_shard in shard_hash.iteritems()])\n\n def raw(self, items):\n \"\"\"\n Multi get all of the correct keys and return them by filter\n\n :Parameters:\n - items: The set of items to get!\n \"\"\"\n return [self.connection[\"{}-{}\".format(self.prefix, shard)].multi(items)\n for shard, items_per_shard in _get_shard_hash(items, self.filter_count).iteritems()]\n\n def add(self, items):\n \"\"\"\n Bulk add all of the correct keys to the correct filter\"\n\n :Parameters:\n - items: The set of items to get!\n \"\"\"\n for shard, items_per_shard in _get_shard_hash(items, self.filter_count).iteritems():\n self.connection[\"{}-{}\".format(self.prefix, shard)].bulk(items)\n\n\ndef _get_shard(item, number_of_filters):\n a = ord(hashlib.md5(item).hexdigest()[:1])\n b = ord(hashlib.md5(item).hexdigest()[1:2])\n return (a * b) % number_of_filters\n\n\ndef _get_shard_hash(items, number_of_filters):\n items_by_shard = {}\n\n for item in items:\n shard = _get_shard(item, number_of_filters)\n if shard not in items_by_shard:\n items_by_shard[shard] = []\n items_by_shard[shard].append(item)\n\n return items_by_shard\n#\n# It's al testing code below this\n#\n\n\ndef timing(f):\n def wrap(*args):\n time1 = time.time()\n ret = f(*args)\n time2 = time.time()\n print '%s function took %0.3f ms' % (f.func_name, (time2-time1)*1000.0)\n return ret\n return wrap\n\n\nnum_keys = 8192\ntestsize = 10\n\n\n@timing\ndef test_one_node():\n hosts = [\"bloom1\"]\n client = BloomRouter(hosts, \"x{}\".format(random.randint(1, 100000)))\n keys = [str(uuid.uuid4()) for _ in range(num_keys)]\n client.add(keys)\n\n assert client.get(keys)\n assert client.all(keys)\n\n\n@timing\ndef test_many_nodes():\n hosts = [\"bloom1\", \"bloom2\", \"bloom3\", \"bloom4\"]\n client = BloomRouter(hosts, \"h{}\".format(random.randint(1, 100000)))\n keys = [str(uuid.uuid4()) for _ in range(num_keys)]\n client.add(keys)\n\n assert client.get(keys)\n assert client.all(keys)\n\n\n@timing\ndef benchmark_put():\n hosts = [\"bloom1\", \"bloom2\", \"bloom3\", \"bloom4\"]\n client = BloomRouter(hosts, \"g{}\".format(random.randint(1, 100000)))\n progress = NyanBar(tasks=testsize)\n for i in range(testsize):\n progress.task_done()\n keys = [str(uuid.uuid4()) for _ in range(num_keys)]\n client.add(keys)\n progress.finish()\n\n\n@timing\ndef benchmark_put_with_many_filters():\n hosts = [\"bloom1\", \"bloom2\", \"bloom3\", \"bloom4\"]\n client = BloomRouter(hosts, \"g{}\".format(random.randint(1, 100000), filter_count=16))\n progress = NyanBar(tasks=testsize)\n for i in range(testsize):\n progress.task_done()\n keys = [str(uuid.uuid4()) for _ in range(num_keys)]\n client.add(keys)\n progress.finish()\n\n\n@timing\ndef put_then_get_with_one_filter():\n hosts = [\"bloom1\", \"bloom2\", \"bloom3\", \"bloom4\"]\n client = BloomRouter(hosts, \"f{}\".format(random.randint(1, 100000)), filter_count=1)\n progress = NyanBar(tasks=testsize)\n for i in range(testsize):\n progress.task_done()\n keys = [str(uuid.uuid4()) for _ in range(num_keys)]\n client.add(keys)\n assert client.get(keys)\n progress.finish()\n\n\n@timing\ndef put_then_get_with_many_filters():\n hosts = [\"bloom1\", \"bloom2\", \"bloom3\", \"bloom4\"]\n client = BloomRouter(hosts, \"f{}\".format(random.randint(1, 100000)), filter_count=16)\n progress = NyanBar(tasks=testsize)\n for i in range(testsize):\n progress.task_done()\n keys = [str(uuid.uuid4()) for _ in range(num_keys)]\n client.add(keys)\n assert client.get(keys)\n progress.finish()\n\n\nprint(\"TESTS Go!\")\ntest_one_node()\ntest_many_nodes()\nprint(\"TESTS DONE!\")\nprint(\"Benchmarks Go!\")\nbenchmark_put()\nbenchmark_put_with_many_filters()\nput_then_get_with_one_filter()\nput_then_get_with_many_filters()\nprint(\"Benchmarks DONE!\")\n"
},
{
"alpha_fraction": 0.7678207755088806,
"alphanum_fraction": 0.7698574066162109,
"avg_line_length": 27.882352828979492,
"blob_id": "902b0b6bd4bb9ff9518e1869625588c7bad49009",
"content_id": "e93f65577f04d0a0e3b9dd1564a8878c888ea98d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 491,
"license_type": "permissive",
"max_line_length": 247,
"num_lines": 17,
"path": "/README.md",
"repo_name": "posix4e/python_bloomd_sharded_driver",
"src_encoding": "UTF-8",
"text": "# python_bloomd_sharded_driver\nWe abstract over a variable number of shards with https://github.com/posix4e/bloom-python-driver/blob/master/pybloomd.py\n\n\nThe bloomd_docker_scaff directory made it easy to get bloomd working in docker quickly. We assume that's running if we are runin main mode. In main mode we run some basic end to end tests against bloomd. Basically inserting rows and removing them.\n\n\nTo install\n```\npip install docker-compose\n```\n\nTo start and run tests\n\n```\n./start\n```\n"
},
{
"alpha_fraction": 0.7564102411270142,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 18.25,
"blob_id": "270d13865fb748a393d28624497b50e41f09c319",
"content_id": "c98b4b12e71c032f04bc00c0ca97794cc7c5fe26",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 78,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 4,
"path": "/bloomd_docker_scaff/Dockerfile",
"repo_name": "posix4e/python_bloomd_sharded_driver",
"src_encoding": "UTF-8",
"text": "FROM thesharp/vortex:latest\nRUN yum install bloomd -y\nEXPOSE 8673\nCMD bloomd\n\n"
}
] | 5 |
ariegenature/ana-photo-flow
|
https://github.com/ariegenature/ana-photo-flow
|
1390181c41b261b7e0cdc8776da38730d30d3005
|
fba91dbb9eb6dfee4d961295ef07b1d6cdcd0c39
|
1e016e383a30b56ed3e51aaeeeb2f881606d3757
|
refs/heads/master
| 2020-05-22T05:26:47.391289 | 2019-05-12T18:52:38 | 2019-05-12T19:35:11 | 186,235,956 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6130653023719788,
"alphanum_fraction": 0.6130653023719788,
"avg_line_length": 38.79999923706055,
"blob_id": "a7368845e68767c3d1628a709fb93995600e1705",
"content_id": "de4af4cadde5498a25a65f360c282938d204b230",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 398,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 10,
"path": "/tests/__init__.py",
"repo_name": "ariegenature/ana-photo-flow",
"src_encoding": "UTF-8",
"text": "\"\"\"Tests for ana_photo_flow.\"\"\"\n\nimport os.path\n\n\n_DATA_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\nMOCK_CONFIG_DIRS = [os.path.join(_DATA_FOLDER, 'etc', 'xdg')]\nMOCK_CONFIG_HOME = os.path.join(_DATA_FOLDER, 'home', '.config')\nMOCK_ANA_PHOTO_FLOW_CONF = os.path.join(_DATA_FOLDER, 'custom_config_folder',\n 'ana_photo_flow.conf')\n"
},
{
"alpha_fraction": 0.6019316911697388,
"alphanum_fraction": 0.6019316911697388,
"avg_line_length": 46.52458953857422,
"blob_id": "3841e39ef7d9b4d453a6d17fe4bdbe78b7a1ac64",
"content_id": "7403e76f16516ec22cf0a70249e45f2eceb1f58d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2899,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 61,
"path": "/tests/test_config_parser.py",
"repo_name": "ariegenature/ana-photo-flow",
"src_encoding": "UTF-8",
"text": "\"\"\"Suite of tests for ana_photo_flow configuration parser.\"\"\"\n\nfrom importlib import reload\nfrom unittest.mock import patch\nimport unittest\n\nfrom . import MOCK_CONFIG_DIRS, MOCK_CONFIG_HOME, MOCK_ANA_PHOTO_FLOW_CONF\nfrom ana_photo_flow import _DEFAULT_CONFIG, init_config\nimport ana_photo_flow # For importlib.reload function\n\n\nclass TestChainConfig(unittest.TestCase):\n \"\"\"Tests about the configuration override chain.\"\"\"\n\n def test_default_config(self):\n \"\"\"Check that if no config file exists, then default config is used.\"\"\"\n reload(ana_photo_flow)\n with init_config() as config:\n for section, subconfig in _DEFAULT_CONFIG.items():\n for key, value in subconfig.items():\n self.assertEqual(config[section][key], value)\n\n @patch('xdg.XDG_CONFIG_DIRS', new=MOCK_CONFIG_DIRS)\n def test_only_global_config(self):\n \"\"\"Check that if only global config file exists, then it is used.\"\"\"\n reload(ana_photo_flow)\n with init_config() as config:\n self.assertEqual(config['celery']['broker_url'],\n 'amqp://global_user:global_password@global_host/global_vhost')\n self.assertEqual(config['celery']['result_backend'],\n 'redis://:global_password@global_host')\n self.assertEqual(config['celery']['worker_log_format'],\n _DEFAULT_CONFIG['celery']['worker_log_format'])\n\n @patch('xdg.XDG_CONFIG_HOME', new=MOCK_CONFIG_HOME)\n def test_only_local_config(self):\n \"\"\"Check that if only local config file exists, then it is used.\"\"\"\n reload(ana_photo_flow)\n with init_config() as config:\n self.assertEqual(config['celery']['broker_url'],\n 'amqp://local_user:local_password@local_host/local_vhost')\n self.assertEqual(config['celery']['result_backend'],\n 'redis://:local_password@local_host')\n self.assertEqual(config['celery']['worker_log_format'],\n _DEFAULT_CONFIG['celery']['worker_log_format'])\n\n @patch.dict('ana_photo_flow.os.environ', {'ANA_PHOTO_FLOW_CONF': MOCK_ANA_PHOTO_FLOW_CONF})\n def test_only_env_config(self):\n \"\"\"Check that if only config file given by environment varialbe exists, then it is used.\"\"\"\n reload(ana_photo_flow)\n with init_config() as config:\n self.assertEqual(config['celery']['broker_url'],\n 'amqp://env_user:env_password@env_host/env_vhost')\n self.assertEqual(config['celery']['result_backend'],\n 'redis://:env_password@env_host')\n self.assertEqual(config['celery']['worker_log_format'],\n _DEFAULT_CONFIG['celery']['worker_log_format'])\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.5940298438072205,
"alphanum_fraction": 0.5940298438072205,
"avg_line_length": 12.958333015441895,
"blob_id": "1c02e77ff30721af736f088d381819cf96bb1859",
"content_id": "4aee5df75d50644af81b1c8fb4ce58ce4e8315f3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 338,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 24,
"path": "/README.rst",
"repo_name": "ariegenature/ana-photo-flow",
"src_encoding": "UTF-8",
"text": "ana_photo_flow\n==============\n\nSimple photos publishing workflow in use at association des naturalistes de l'Ariège (also CEN Ariège and CPIE Ariège).\n\nUsage\n-----\n\nInstallation\n------------\n\nRequirements\n^^^^^^^^^^^^\n\nCompatibility\n-------------\n\nLicence\n-------\n\nAuthors\n-------\n\n`ana_photo_flow` was written by `Yann Voté <[email protected]>`_.\n"
},
{
"alpha_fraction": 0.6679104566574097,
"alphanum_fraction": 0.6679104566574097,
"avg_line_length": 32.5,
"blob_id": "358f920040e1822a063592c1dae698cf642784bf",
"content_id": "2c97a8ad78576c91e608d281e55daa2bd0699f52",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 536,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 16,
"path": "/tests/test_celery_application.py",
"repo_name": "ariegenature/ana-photo-flow",
"src_encoding": "UTF-8",
"text": "\"\"\"Suite of tests for ana_photo_flow Celery application.\"\"\"\n\nimport unittest\n\nfrom ana_photo_flow import _DEFAULT_CONFIG, init_app\n\n\nclass TestApplicationConfig(unittest.TestCase):\n \"\"\"Tests about the application configuration.\"\"\"\n\n def test_default_config(self):\n \"\"\"Check that if no config file exists, then default config is used.\"\"\"\n with init_app() as app:\n conf_pairs = list(app.conf.items()).copy()\n for pair in _DEFAULT_CONFIG['celery'].items():\n self.assertIn(pair, conf_pairs)\n"
},
{
"alpha_fraction": 0.6339020729064941,
"alphanum_fraction": 0.6353857517242432,
"avg_line_length": 31.878047943115234,
"blob_id": "f8552136fd8a2e2a81c6a262c948069a8a4d4a6b",
"content_id": "884231ec6d8aab4d3bdd7aa202bcbb2ad0ba809c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2696,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 82,
"path": "/ana_photo_flow/__init__.py",
"repo_name": "ariegenature/ana-photo-flow",
"src_encoding": "UTF-8",
"text": "\"\"\"ana_photo_flow main package.\"\"\"\n\nfrom collections import ChainMap\nimport configparser\nimport contextlib\nimport logging\nimport os\nimport os.path\nimport traceback\n\nfrom celery import Celery\nimport xdg\n\n\n__version__ = '0.1.0-dev2'\n__author__ = 'Yann Voté <[email protected]>'\n__all__ = []\n\n\n# Where to search for config files\n# Order is important: each file overrides settings from previous files in this list\n_CONFIG_LOCATIONS = [\n *(os.path.join(folder, 'ana_photo_flow.conf') for folder in xdg.XDG_CONFIG_DIRS),\n os.path.join(xdg.XDG_CONFIG_HOME, 'ana_photo_flow.conf'),\n os.environ.get('ANA_PHOTO_FLOW_CONF')\n]\n\n_DEFAULT_CONFIG = {\n 'celery': {\n 'broker_url': 'pyamqp://guest@localhost//',\n 'result_backend': 'redis://localhost',\n 'worker_log_format': ('%(asctime)s %(processName)s[%(process)s]: '\n '%(levelname)s - %(message)s'),\n 'worker_task_log_format': ('%(asctime)s %(processName)s[%(process)s]: '\n '%(levelname)s - %(task_name)s %(task_id)s - %(message)s'),\n },\n}\n\n\[email protected]\ndef init_config():\n \"\"\"Context manager initializing a configuration dictionary on enter and cleaning it up on exit.\n\n The context manager returns the configuration dictionary.\n \"\"\"\n config = ChainMap(_DEFAULT_CONFIG)\n cfgparser = configparser.ConfigParser()\n for cfgfname in _CONFIG_LOCATIONS:\n if not cfgfname or not os.path.isfile(cfgfname):\n continue\n cfgparser.read(cfgfname)\n newconfig = {section: _DEFAULT_CONFIG[section].copy() for section in cfgparser.sections()}\n for section in cfgparser.sections():\n newconfig[section].update(dict(cfgparser.items(section)))\n config = config.new_child(newconfig)\n yield config\n config = {}\n\n\[email protected]\ndef init_app():\n \"\"\"Context manager initializing application on enter and shutting it down properly on exit.\n\n The context manager returns the application object.\n \"\"\"\n with init_config() as config:\n logger = logging.getLogger()\n logger.info('Starting ana_photo_flow...')\n celery_config = config['celery']\n app = Celery('ana_photo_flow')\n app.conf.update(celery_config)\n logger.debug('Celery started with following configuration:\\n%s', app.conf.humanize())\n try:\n yield app\n except Exception as err:\n traceback.print_tb(err.__traceback__)\n logging.error(str(err))\n except KeyboardInterrupt:\n logger.debug('Terminating as requested...')\n finally:\n logger.info('ana_photo_flow finished.')\n logging.shutdown()\n"
},
{
"alpha_fraction": 0.665083110332489,
"alphanum_fraction": 0.6817102432250977,
"avg_line_length": 17.30434799194336,
"blob_id": "0671c982c3e58d2af5519d1872f89630fcf5d59e",
"content_id": "81ef8eb5e712f1d19f592d58b2fcb2323db09ffc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 421,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 23,
"path": "/tox.ini",
"repo_name": "ariegenature/ana-photo-flow",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist = codestyle,py3\n\n[testenv]\ndeps = pytest\ncommands = {envpython} -m pytest {toxinidir}/tests/\n\n[testenv:codestyle]\ndeps =\n check-manifest\n flake8\n readme_renderer\n twine\nskip_install = true\ncommands =\n check-manifest --ignore tox.ini,docs*,tests*\n {envpython} setup.py sdist\n twine check dist/*\n {envpython} -m flake8 {toxinidir}\n\n[flake8]\nmax-line-length = 100\nexclude = .tox,*.egg,\n"
},
{
"alpha_fraction": 0.5501893758773804,
"alphanum_fraction": 0.5591856241226196,
"avg_line_length": 30.522388458251953,
"blob_id": "afa365039baf761d474c0e839c2a2a10d73468e5",
"content_id": "1154ed8cc59f2b730fe313a334de5b354acbeb00",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2115,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 67,
"path": "/setup.py",
"repo_name": "ariegenature/ana-photo-flow",
"src_encoding": "UTF-8",
"text": "import io\nimport os\nimport re\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef read(filename):\n filename = os.path.join(os.path.dirname(__file__), filename)\n text_type = type(u\"\")\n with io.open(filename, mode=\"r\", encoding='utf-8') as fd:\n return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\\1``'),\n fd.read())\n\n\nsetup(\n name=\"ana_photo_flow\",\n version=\"0.1.0-dev2\",\n url=\"https://github.com/ariegenature/ana-photo-flow\",\n license='MIT',\n author=\"Yann Voté\",\n author_email=\"[email protected]\",\n description=(\"Simple photos publishing workflow in use at association des \"\n \"naturalistes de l'Ariège (also CEN Ariège and CPIE Ariège)\"),\n long_description=read(\"README.rst\"),\n packages=find_packages(exclude=('tests',)),\n install_requires=[\n 'celery',\n 'circus',\n 'xdg',\n ],\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n extras_require={\n 'dev': [\n 'check-manifest',\n 'bumpversion',\n 'flake8',\n 'pytest',\n 'readme_renderer',\n 'tox',\n 'twine',\n ],\n },\n data_files=[\n ('examples', ['ana_photo_flow.conf.example'])\n ],\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Console',\n 'Environment :: No Input/Output (Daemon)',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Natural Language :: French',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n ],\n)\n"
}
] | 7 |
pangxie1987/SocketServer
|
https://github.com/pangxie1987/SocketServer
|
2942df7d3e69bd89cdbcf04284b9b69f5e424973
|
b11d2c30cd32e789e38dd1965e04b44c11365c8f
|
96b183c03c68d6a67501f7195878cea78063451f
|
refs/heads/master
| 2020-03-16T13:08:28.603018 | 2018-07-03T05:13:33 | 2018-07-03T05:13:33 | 132,682,120 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5648788809776306,
"alphanum_fraction": 0.5847750902175903,
"avg_line_length": 21.855670928955078,
"blob_id": "e1b482c3f8e79a794c56fee76434e9d2e41b61b5",
"content_id": "9727db578058579b4e2186de06a43c2b6938b02a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2750,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 97,
"path": "/Server/TCPServer.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# # -*- coding:utf-8 -*-\r\n\r\n'''\r\n使用socketserver创建一个TCP服务器\r\n\r\n'''\r\nfrom socketserver import BaseRequestHandler,TCPServer,ThreadingTCPServer\r\nfrom socket import socket,AF_INET,SOCK_STREAM\r\n\r\nclass EchoHandler(BaseRequestHandler):\r\n \r\n def handle(self):\r\n print('Got connection from ',self.client_address)\r\n \r\n while True:\r\n\r\n msg=self.request.recv(1024)\r\n if not msg:\r\n break\r\n self.request.send(msg)\r\n print(msg)\r\n\r\nif __name__=='__main__':\r\n '''\r\n ##单个客户端连接\r\n # serv=TCPServer(('',20000),EchoHandler) #单个客户端连接\r\n # serv.serve_forever()\r\n '''\r\n '''\r\n ##多个客户端连接\r\n # serv=ThreadingTCPServer(('',20000),EchoHandler) #多个客户端连接\r\n # serv.serve_forever()\r\n '''\r\n\r\n '''\r\n #创建线程池,控制连接数\r\n from threading import Thread\r\n NWORKS=2\r\n serv=TCPServer(('',20000),EchoHandler)\r\n for n in range(NWORKS):\r\n t=Thread(target=serv.serve_forever)\r\n t.daemon=True\r\n t.start()\r\n serv.serve_forever()\r\n '''\r\n\r\n \r\n # 设置socket参数\r\n # SOL_SOCKET \r\n from socket import SOL_SOCKET,SO_REUSEADDR\r\n\r\n serv=TCPServer(('',20000),EchoHandler,bind_and_activate=False)\r\n\r\n # 设置scoket的level,选择SOL_SOCKET,值为SO_REUSEADDR\r\n # SO_REUSEADDR当socket关闭后,本地端用于该socket的端口号立刻就可以被重用。\r\n # 通常来说,只有经过系统定义一段时间后,才能被重用。\r\n serv.socket.setsockopt(SOL_SOCKET,SO_REUSEADDR,True)\r\n\r\n # serv.bind(host,port)\r\n # 将套接字绑定到地址, 在AF_INET下,以元组(host,port)的形式表示地址.\r\n serv.server_bind()\r\n\r\n # 通过服务器的构造函数来激活服务器。默认的行为只是监听服务器套接字。可重载。\r\n serv.server_activate()\r\n\r\n serv.serve_forever()\r\n\r\n\r\n\r\n# '''\r\n# 直接使用socket创建Server\r\n# '''\r\n\r\n# from socketserver import TCPServer,ThreadingTCPServer,BaseRequestHandler\r\n# from socket import socket,AF_INET,SOCK_STREAM\r\n\r\n# # 创建socket\r\n# serv=socket(AF_INET,SOCK_STREAM)\r\n# # 绑定IP and port\r\n# serv.bind(('127.0.0.1',8000))\r\n# # 开启监听\r\n# serv.listen(5)\r\n# while True:\r\n# # 获取连接\r\n# conn,addr=serv.accept()\r\n# print('Got connection from ',addr)\r\n# while True:\r\n# # 获取数据,1024代表数据大小\r\n# data=conn.recv(1024)\r\n# print(data)\r\n# # 发送数据给客户端,数据必须是byte类型\r\n# conn.sendall(b'from Server')\r\n# if len(data)==0:\r\n# # 关闭连接\r\n# conn.close()\r\n# break\r\n# serv.close()"
},
{
"alpha_fraction": 0.502970278263092,
"alphanum_fraction": 0.5346534848213196,
"avg_line_length": 17.5,
"blob_id": "3b80354607ce49097e779ea391a8955b6b93e040",
"content_id": "e208a743e5c69e59356aabec77a1f58d06877c34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 563,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 26,
"path": "/socket/UDP/udpClient.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\n创建一个UDP客户端,发送消息并接收服务器返回的带时间戳的消息\r\n'''\r\n\r\nfrom socket import socket, AF_INET, SOCK_DGRAM\r\n\r\nhost = '127.0.0.1'\r\nport = 62555\r\naddr = (host, port)\r\n\r\ns = socket(AF_INET, SOCK_DGRAM)\r\n# udp无connect()\r\n\r\nwhile True:\r\n try:\r\n input = raw_input('>')\r\n if not input:\r\n break\r\n s.sendto(input, addr)\r\n data, addr = s.recvfrom(1024)\r\n if not data:\r\n break\r\n print(data)\r\n except KeyboardInterrupt:\r\n s.close()"
},
{
"alpha_fraction": 0.5411081314086914,
"alphanum_fraction": 0.5473636984825134,
"avg_line_length": 31.402984619140625,
"blob_id": "202a326782e891a4e538eff3c7b844b17b3cabb1",
"content_id": "3dda65201dc00038182543da3803e7cf19b8e970",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2444,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 67,
"path": "/socket/FTP/sftpClinet.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\n使用paramiko库实现sftp的登录及文件的传输和执行命令\r\n'''\r\n\r\nimport paramiko, os\r\n\r\nclass SFTP(object):\r\n def __init__(self):\r\n\r\n self.host ='10.243.140.219'\r\n self.user='kfts'\r\n self.passwd='kfts'\r\n self.port = 22\r\n self.remote_dir = '/home/kfts/its/data_sync/'\r\n self.local_dir = \"C:\\Users\\pingbao.liu\\Desktop\\log\"\r\n # 连接及登录部分\r\n try:\r\n self.t = paramiko.Transport(self.host, self.port)\r\n self.t.connect(username=self.user, password=self.passwd)\r\n self.sftp = paramiko.SFTPClient.from_transport(self.t)\r\n except Exception as e:\r\n print e\r\n\r\n def down_files(self):\r\n '''\r\n 使用get方法将远程文件下载到本地\r\n '''\r\n files = self.sftp.listdir(self.remote_dir) # 列出remote_dir 目录下的所有文件, 返回文件名列表\r\n print(files)\r\n for file in files:\r\n # print(file)\r\n try:\r\n self.sftp.get(os.path.join(self.remote_dir, file), os.path.join(self.local_dir, file)) # 使用get方法将远程文件下载到本地\r\n except Exception as a:\r\n print a\r\n else:\r\n print('%s download sucess!'%file)\r\n print('all download done! ')\r\n self.t.close()\r\n\r\n def upload_files(self):\r\n '使用put方法上传本地文件到远程服务器'\r\n try:\r\n files = os.listdir(self.local_dir)\r\n print(files)\r\n for file in files:\r\n self.sftp.put(os.path.join(self.local_dir, file), os.path.join(self.remote_dir, file))\r\n print('%s upload sucess!'%file)\r\n print('all upload sucess!')\r\n self.t.close()\r\n except Exception as b:\r\n print b\r\n\r\n def execute_command(self):\r\n '使用SSHClient()函数,在远程机器上执行命令'\r\n ssh = paramiko.SSHClient()\r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n ssh.connect(hostname=self.host, username=self.user, password=self.passwd)\r\n stdin, stdout, stderr = ssh.exec_command(\"cd /home/kfts/its/data_sync/logs;mkdir test\")\r\n print(stdout.readlines())\r\n print(stderr.readlines())\r\n ssh.close()\r\n\r\nif __name__ == '__main__':\r\n sftptest = SFTP()\r\n sftptest.execute_command()\r\n"
},
{
"alpha_fraction": 0.5087336301803589,
"alphanum_fraction": 0.5436681509017944,
"avg_line_length": 16.31999969482422,
"blob_id": "21ed6493cc0ed250136d301f129944f5c67f2157",
"content_id": "d5fd70086c7ff858c6af2d195032bcfed5b66261",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 25,
"path": "/Client/TCPClient.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\n用于连接TCP服务的客户端\r\n'''\r\n\r\n\r\nfrom socket import socket, AF_INET, SOCK_STREAM\r\n\r\ns = socket(AF_INET, SOCK_STREAM)\r\n\r\ns.connect(('localhost', 20000))\r\nwhile True:\r\n msg = (input(\">>:\").strip()).encode('utf-8')\r\n if len(msg) == 0:\r\n continue\r\n elif msg == b'exit':\r\n break\r\n s.sendall(msg)\r\n data = s.recv(1024)\r\n print('Received:', data)\r\n\r\ns.close()\r\n\r\n# print(s.send(b'hello'))\r\n# print(s.recv(8192))\r\n"
},
{
"alpha_fraction": 0.5755102038383484,
"alphanum_fraction": 0.5938775539398193,
"avg_line_length": 18.41666603088379,
"blob_id": "8b51da18406ff2f83b477ede795160eb3cec2986",
"content_id": "b324d7168e12740d0b3062e24269aa3396bfbb26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1062,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 48,
"path": "/PyPerformance .py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "'''\r\npython代码进行性能测试\r\nhttp://python3-cookbook.readthedocs.io/zh_CN/latest/c14/p13_profiling_and_timing_your_program.html\r\n'''\r\n\r\n## 使用cProfile模块\r\n# import time\r\n# import cProfile\r\n# def func(n):\r\n# \twhile n>0:\r\n# \t\tprint('times-%s'%n)\r\n# \t\ttime.sleep(1)\r\n# \t\tn-=1\r\n\r\n# if __name__=='__main__':\r\n# cProfile.run('func(10)')\r\n\r\n# # 使用timeit模块\r\n# from timeit import timeit\r\n# #比较以下两种模式的时间消耗\r\n# print(timeit('math.sqrt(2)','import math'))\r\n\r\n# print(timeit('sqrt(2)','from math import sqrt'))\r\n\r\n# # 构造一个计算时间的装饰器\r\n# from functools import wraps\r\n# import time\r\n\r\n# def timeit(func):\r\n# \t@wraps(func)\r\n# \tdef wrappers(*args,**kwargs):\r\n# \t\tstart=time.time()\r\n# \t\tprint('start-time',start)\r\n# \t\tr=func(*args,**kwargs)\r\n# \t\tend=time.time()\r\n# \t\tprint('end-time',end)\r\n# \t\tprint('{}.{}:{}'.format(func.__module__,func.__name__,end-start))\r\n# \t\treturn r\r\n# \treturn wrappers\r\n\r\n# @timeit\r\n# def countname(n):\r\n# \twhile n>0:\r\n# \t\tprint(n)\r\n# \t\tn-=1\r\n# \t\ttime.sleep(0.8)\r\n\r\n# countname(10)\r\n"
},
{
"alpha_fraction": 0.592510998249054,
"alphanum_fraction": 0.6123347878456116,
"avg_line_length": 17,
"blob_id": "a6823d59f49bdc011d1d8759698012748fdb6d47",
"content_id": "5a9c9587d0e1f6c71ee5f103f41f6f612cdc1ee9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 24,
"path": "/socket/UDP/udpServer.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "'''\r\nUDPSERVER 接收客户端的消息,在消息前加一个时间戳返回的UDP服务器\r\n'''\r\n\r\nfrom socket import AF_INET, SOCK_DGRAM, socket\r\nfrom time import ctime\r\n\r\nhost = ''\r\nport = 62555\r\naddr = (host, port)\r\n\r\ns = socket(AF_INET, SOCK_DGRAM)\r\ns.bind(addr)\r\n# udp无listen()\r\n\r\nwhile True:\r\n print('waiting for massage...')\r\n \r\n data, addr = s.recvfrom(1024)\r\n s.sendto('[%s] %s'%(ctime(), data),addr)\r\n\r\n print('received data:',data)\r\n print('returned to:',addr)\r\ns.close()"
},
{
"alpha_fraction": 0.4798206388950348,
"alphanum_fraction": 0.5103139281272888,
"avg_line_length": 18.236364364624023,
"blob_id": "89d99cfeea6e13a1e179837f5323cd89a02da7c0",
"content_id": "b1201edb722ee0bdf9ef36123c1758aad69817ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1115,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 55,
"path": "/Server/socket_test.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "from socket import socket, AF_INET, SOCK_STREAM\r\nimport time\r\nimport json\r\n\r\n# s = socket(AF_INET, SOCK_STREAM)\r\n\r\n# s.connect(('10.253.117.127', 7900))\r\n\r\n# print(s)\r\n\r\n# while True:\r\n# # msg = (input(\">>:\").strip()).encode('utf-8')\r\n# # if len(msg) == 0:\r\n# # continue\r\n# # elif msg == b'exit':\r\n# # break\r\n# # s.sendall(msg)\r\n# # data = s.recv(1024)\r\n# # print('Received:', data)\r\n# s.sendall(b'9999')\r\n# time.sleep(0.5)\r\n# print('msg')\r\n# #print(s.recv(2))\r\n# s.close()\r\nimport threading\r\n\r\nwith open ('Server/host.json') as f:\r\n conf = json.load(f)\r\n host = str(conf['host'])\r\n port = conf['port']\r\n nthreads = conf['threads']\r\n\r\ndef test():\r\n try:\r\n s = socket(AF_INET, SOCK_STREAM)\r\n\r\n s.connect((host, port))\r\n \r\n s.send(b'1')\r\n except Exception, e:\r\n print(e)\r\n else:\r\n print('connect sucess!')\r\n\r\n time.sleep(10000)\r\n\r\nts = []\r\nfor i in range(nthreads):\r\n t = threading.Thread(target=test)\r\n t.setDaemon(False)\r\n t.start()\r\n ts.append(t)\r\n\r\nfor t in ts:\r\n t.join()\r\n\r\n"
},
{
"alpha_fraction": 0.5115207433700562,
"alphanum_fraction": 0.5245775580406189,
"avg_line_length": 21.962963104248047,
"blob_id": "eab75a707af69d2b7885c70a0ee500520526efbf",
"content_id": "4eafe6e39e03f7a1f0cea9188b02f385a0ee4661",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1380,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 54,
"path": "/socket/FTP/ftpClient.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\nftp连接客户端ftplib\r\n'''\r\n\r\nimport ftplib, os, socket\r\n\r\n# host = 'ftp.mozilla.org' # ftp Server地址\r\nhost = 'ftp.scene.org'\r\ndirn = 'music/groups/2063music/' # 目标文件目录\r\nfile = '63_001-opal2000-gatev0-5.mp3' # 目标文件名称\r\n\r\ndef main():\r\n '''\r\n ftp形式访问ftp.scene.org,并下载目录下的文件\r\n '''\r\n try:\r\n f = ftplib.FTP(host)\r\n f.set_pasv(False)\r\n # except (socket.error, socket.gaierror), e:\r\n # print(\"ERROR:cant't reach %s\" %host)\r\n except Exception as e:\r\n print e\r\n return\r\n print(\"***Connected to host %s\"%host)\r\n\r\n try:\r\n f.login()\r\n except ftplib.error_perm:\r\n print(\"ERROR:cannot login anonymously\")\r\n f.quit()\r\n return\r\n print(\"***Logged in as 'annonymous'\")\r\n \r\n try:\r\n f.cwd(dirn)\r\n except ftplib.error_perm:\r\n print(\"ERROR:cannot CD to %s\"%dirn)\r\n f.quit()\r\n return\r\n print(\"***Changed to %s folder\"%dirn)\r\n\r\n try:\r\n f.retrbinary('RETR %s'%file, open(file, 'wb').write)\r\n except ftplib.error_perm:\r\n print(\"ERROR:cannot read file %s\"%file)\r\n # os.unlink(file) # os.unlink()用于删除文件\r\n else:\r\n print(\"***Download %s to CWD\"%file)\r\n f.quit()\r\n return\r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n\r\n"
},
{
"alpha_fraction": 0.6269896030426025,
"alphanum_fraction": 0.6505190134048462,
"avg_line_length": 24.796297073364258,
"blob_id": "1f604ed11bdc6ed2f02a22bbff2e4eab220a4853",
"content_id": "a9b7dceaedb5eb0d5f480529bdcd5ba69f03af61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1621,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 54,
"path": "/socket/Email/POP3Client.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\npython SMTP发送邮件,POP3接收邮件 \r\nSMTP协议用于发送邮件,POP3用于接收邮件\r\n'''\r\n\r\nfrom smtplib import SMTP as smtp\r\nfrom poplib import POP3_SSL as pop #用POP3_SSL\r\nfrom time import sleep\r\nfrom email.mime.text import MIMEText\r\n\r\n\r\n\r\nSMTPServer = 'smtp-mail.outlook.com'\r\nPOP3Server = 'pop.qq.com'\r\n\r\norigHdrs = ['From:[email protected]', 'To:[email protected]', 'Subject:python email test']\r\norigBody = ['xxx', 'yyy', 'zzz']\r\norigMsg = '\\r\\n\\r\\n'.join(['\\r\\n'.join(origHdrs),'\\r\\n'.join(origBody)])\r\nfrom_addr = '[email protected]'\r\npassword = 'Lpb201212'\r\nsmtp_server = 'smtp-mail.outlook.com'\r\nto_addr = '[email protected]'\r\nto_user = '773779347'\r\nto_License = 'siyweyavnatgbcbf' #qq邮箱,163邮箱要用授权码登录,授权码在邮箱设置中获取\r\n\r\ndef sendmail():\r\n '使用SMTP协议发送邮件'\r\n serdServer = smtp()\r\n serdServer.connect(SMTPServer, '587')\r\n serdServer.starttls() # 解决SMTP加密问题\r\n # serdServer.set_debuglevel(1) # 设置日志提示级别\r\n serdServer.login(from_addr, password)\r\n errs = serdServer.sendmail(from_addr, to_addr, origMsg)\r\n print('Mail Send Sucess!')\r\n serdServer.quit()\r\n\r\n\r\ndef recv_mail():\r\n '使用POP3_SSL接收邮件,解决邮件安全加密问题'\r\n print('recving mail...')\r\n recvServer = pop(POP3Server)\r\n recvServer.user(to_user)\r\n recvServer.pass_(to_License)\r\n rsp, msg, siz = recvServer.retr(recvServer.stat()[0])\r\n sep = msg.index('')\r\n recvBody = msg[sep+1:]\r\n print('Mail_Body:',recvBody)\r\n assert origBody[0] == recvBody[0]\r\n\r\n\r\nsendmail()\r\nsleep(10)\r\nrecv_mail()"
},
{
"alpha_fraction": 0.5654281377792358,
"alphanum_fraction": 0.5896607637405396,
"avg_line_length": 19.413793563842773,
"blob_id": "844226c823cbf42a13b891989c1a752244924773",
"content_id": "14d55fe97b2737eb7798126cea1be1ea7501680b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 629,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 29,
"path": "/Server/WebService.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\n'''\r\n模拟WEB服务器\r\n'''\r\n\r\n\r\nfrom socket import socket,AF_INET,SOCK_STREAM\r\n\r\ndef handle_request(client):\r\n buff=client.recv(1024)\r\n print(buff)\r\n client.sendall(b'HTTP/1.1 200 OK\\r\\n\\r\\n')\r\n client.sendall(b'Hello,World!')\r\n\r\n\r\ndef main():\r\n serv=socket(AF_INET,SOCK_STREAM)\r\n serv.bind(('localhost',8001))\r\n serv.listen(5)\r\n print('Waiting for connection...')\r\n \r\n while True:\r\n connection,address=serv.accept()\r\n print('Got connection form ',address)\r\n handle_request(connection)\r\n connection.close()\r\n\r\nif __name__=='__main__':\r\n main()"
},
{
"alpha_fraction": 0.5982142686843872,
"alphanum_fraction": 0.6071428656578064,
"avg_line_length": 10.666666984558105,
"blob_id": "40fc1c330777706aa4c57c2dbfce4f8c0b528c4f",
"content_id": "1a211128a7d6337b042d20cbc5b56a1e9fb4ed33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 9,
"path": "/socket/FTP/nntpClient.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\n网络新闻传输协议(NNTP)\r\nNNTP procol Test\r\n与FTP类似\r\n'''\r\n\r\nfrom nntplib import NNTP\r\nn = NNTP"
},
{
"alpha_fraction": 0.6298811435699463,
"alphanum_fraction": 0.6485568881034851,
"avg_line_length": 24.863636016845703,
"blob_id": "982b52be0e2a4c3606eda959a5fd91c1a4c1d873",
"content_id": "54eff94da3a6b10d282af64aa2d9e4fc1522127b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 665,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 22,
"path": "/Server/UDPServer.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\n'''\r\n创建UDPServer\r\n'''\r\nfrom socketserver import BaseRequestHandler,ThreadingUDPServer,UDPServer\r\nimport time\r\n\r\nclass TimeHandler(BaseRequestHandler):\r\n def handle(self):\r\n print('Got connection from ',self.client_address)\r\n msg,sock=self.request\r\n resp=time.ctime()\r\n sock.sendto(resp.encode('ascii'),self.client_address)\r\n\r\nif __name__=='__main__':\r\n \r\n # #单线程模式(一次只能响应一个连接请求)\r\n # serv=UDPServer(('',20000),TimeHandler)\r\n\r\n #多线程模式(响应多个客户端的连接)\r\n serv=ThreadingUDPServer(('',20000),TimeHandler)\r\n serv.serve_forever()"
},
{
"alpha_fraction": 0.541745126247406,
"alphanum_fraction": 0.5699937343597412,
"avg_line_length": 28.09433937072754,
"blob_id": "ff47c2a4bff660202319e6707491afb817f84e80",
"content_id": "bde59b98f93a1a10665745fa6bb231d1101478cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1707,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 53,
"path": "/WinServer_Rm.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n# https://www.cnblogs.com/Tempted/p/7485629.html\r\n\r\n'''\r\nlinux windows 远程操作处理,测试用\r\n'''\r\n\r\nimport wmi,time,os\r\nimport paramiko,sys\r\n\r\ndef ssh_cmd(ip,port,cmd,user,passwd):\r\n '''\r\n Linux远程\r\n '''\r\n result = \"\"\r\n try:\r\n ssh = paramiko.SSHClient()\r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n ssh.connect(ip,port,user,passwd)\r\n stdin, stdout, stderr = ssh.exec_command(cmd)\r\n result = stdout.read()\r\n print result\r\n ssh.close()\r\n except:\r\n print \"ssh_cmd err.\"\r\n return result\r\n\r\n# ssh_cmd('10.243.140.218','2000','start','its',\"Passw0rd@218\")\r\n\r\ndef sys_version(ipadress,user,pw):\r\n conn=wmi.WMI(computer=ipadress,user=user,password=pw)\r\n for sys in conn.Win32_OperatingSystem():\r\n print('Version:%s'%sys.Caption.encode('utf-8'),'Vernum:%s'%sys.BuildNumber) #系统信息\r\n # print('系统位数:%s'%sys.OSArchitecture) #系统的位数\r\n # print('系统进程:%s'%sys.NumberofProcesses) #系统的进程\r\n\r\n try:\r\n filename=['C:\\its\\深模拟撮合\\3_start.bat','C:\\its\\深模拟撮合\\Test.bat']\r\n # cmd_callbat=['cd C:\\its\\深模拟撮合','start bpdemo.prg /B']\r\n for file in filename:\r\n \r\n cmd_callbat=r'cmd /c call %s'%file\r\n # cmd_callbat=r'start '+file\r\n print(cmd_callbat)\r\n process_id,resback=conn.Win32_Process.Create(cmd_callbat) #执行bat\r\n time.sleep(1)\r\n print('%s 执行完成'%file)\r\n print(resback)\r\n\r\n except Exception,e:\r\n print(e)\r\n\r\n# sys_version (\"10.243.140.218\",\"ksadmin\",'Kayak2018!')"
},
{
"alpha_fraction": 0.4781144857406616,
"alphanum_fraction": 0.5067340135574341,
"avg_line_length": 18.64285659790039,
"blob_id": "ac28bf64f03cca2bc44439073732d1bcea3ef8a9",
"content_id": "02394bb1f844d6577ac1c3857ed2cf2d5e05f237",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 28,
"path": "/socket/TCP/socketClient.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\n创建一个TCP客户端,程序会提示用户输入要传给服务器的信息,显示服务器返回的加了时间戳的结果\r\n'''\r\n\r\nfrom socket import socket, AF_INET, SOCK_STREAM\r\nimport time\r\n\r\ns = socket(AF_INET, SOCK_STREAM)\r\ns.connect(('127.0.0.1', 62555))\r\n\r\nwhile True:\r\n try:\r\n input = raw_input('>')\r\n if not input:\r\n break\r\n s.send(input)\r\n data = s.recv(1024)\r\n if not data:\r\n break\r\n print(data)\r\n\r\n time.sleep(1)\r\n except KeyboardInterrupt as e:\r\n # pass \r\n print('close connecting...')\r\n s.close()\r\n break\r\n \r\n\r\n "
},
{
"alpha_fraction": 0.5997229814529419,
"alphanum_fraction": 0.6149584650993347,
"avg_line_length": 19.878787994384766,
"blob_id": "e20c5adc6603d77ea05891f406c5c53094b3499a",
"content_id": "3b0568e1239f1eb7023e5269b061e0ec4dad4d9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 33,
"path": "/socket/TCP/socketServer.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\n创建一个能接受客户的消息,在消息前加一个时间戳后返回的TCP服务器\r\nsocket:套接字\r\nAF_INET:socket网络编程\r\nSOCK_STREAM:TCP/IP\r\n'''\r\nfrom socket import socket, AF_INET, SOCK_STREAM\r\nfrom time import ctime\r\n\r\n\r\nhost = ''\r\nport = '62555'\r\naddr = (host, port) # host为空表示监听任意IP的连接请求\r\n\r\ns = socket(AF_INET, SOCK_STREAM)\r\ns.bind(addr)\r\ns.listen(5) # listen()参数表示最多运行几个连接同时连进来,后来的连接就会被拒绝掉\r\n\r\nwhile True:\r\n print('wating for connecting...')\r\n tcpCliSock, addr = s.accept()\r\n print('connected from ',addr)\r\n\r\n while True:\r\n data = tcpCliSock.recv(1024) #接收消息的长度\r\n print('data:',data)\r\n if not data:\r\n break\r\n tcpCliSock.send('[%s] %s'%(ctime(), data))\r\n\r\n tcpCliSock.close()\r\ns.close()\r\n"
},
{
"alpha_fraction": 0.5777778029441833,
"alphanum_fraction": 0.6333333253860474,
"avg_line_length": 16.200000762939453,
"blob_id": "395be11626a99ffb1de8cc7d0ec3e51d635a6d75",
"content_id": "f487f9e68cb9309f7c6edbc129ea4f2c27cc3e4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 10,
"path": "/Client/UDPClient.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\r\n'''\r\nUDP Client\r\n'''\r\n\r\nfrom socket import socket,AF_INET,SOCK_DGRAM\r\ns=socket(AF_INET,SOCK_DGRAM)\r\ns.sendto(b'',('localhost',20000))\r\n\r\nprint(s.recv(1024))"
},
{
"alpha_fraction": 0.5629903078079224,
"alphanum_fraction": 0.5782187581062317,
"avg_line_length": 17.54054069519043,
"blob_id": "d75d77bb0954cd506fefd97fdc78fcabc4caa17c",
"content_id": "96522e1256570aad9014914d563a9909e3cb0d04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2375,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 111,
"path": "/Test.py",
"repo_name": "pangxie1987/SocketServer",
"src_encoding": "UTF-8",
"text": "# #-*- coding:utf-8 -*-\r\n# '''\r\n# Test file\r\n# '''\r\n\r\n# '''\r\n# ipaddress\r\n# '''\r\n# import ipaddress\r\n# ip='123.45.67.64/27'\r\n\r\n# net=ipaddress.ip_network(ip)\r\n# print(net)\r\n# for adress in net:\r\n# pass\r\n# # print(adress)\r\n\r\n# inet=ipaddress.ip_interface(ip)\r\n# print(inet)\r\n# print(inet.ip)\r\n\r\n\r\n# from socketserver import TCPServer,ThreadingTCPServer,BaseRequestHandler\r\n# from socket import socket,AF_INET,SOCK_STREAM\r\n\r\n# '''\r\n# 直接用socket创建Server\r\n# '''\r\n# serv=socket(AF_INET,SOCK_STREAM)\r\n# serv.bind(('127.0.0.1',8000))\r\n# serv.listen(5)\r\n# while True:\r\n# conn,addr=serv.accept()\r\n# print('Got connection from ',addr)\r\n# while True:\r\n# data=conn.recv(1024)\r\n# print(data,len(data))\r\n# conn.sendall(b'i am Server')\r\n# if len(data)==0:\r\n# print('close connection')\r\n# conn.close()\r\n# break\r\n# serv.close()\r\n\r\n'''\r\n多线程,Event的使用\r\n'''\r\n# from threading import Thread,Event\r\n# import time\r\n\r\n# def countall(n,start_evt):\r\n# print('countall starting')\r\n# start_evt.set()\r\n# while n>0:\r\n# #print(n)\r\n# print('T-minus', n)\r\n# n-=1\r\n# time.sleep(2)\r\n\r\n# start_evt=Event()\r\n# print('Launching countall')\r\n# t=Thread(target=countall,args=(10,start_evt))\r\n# t.start()\r\n# # t.join()\r\n# start_evt.wait()\r\n# print('countall is running')\r\n\r\n\r\n# '''\r\n# 获取输入或者读取文件模块fileinput\r\n# '''\r\n# import fileinput\r\n# with fileinput.input('.\\TestClient.py') as f:\r\n# for line in f:\r\n# print(f.filelineno(),line,end='\\n')\r\n\r\n# '''\r\n# 提示密码输入(与平台无关)\r\n# '''\r\n# import getpass\r\n# # 获取当前的用户名\r\n# user=getpass.getuser()\r\n# print(user)\r\n# # 提示用户输入密码,并不显示当前的输入(pycharm无法使用)\r\n# pswd=getpass.getpass(prompt='Password:',stream=None)\r\n# print(pswd)\r\n\r\n# '''\r\n# 获取终端的大小,pychram无法使用\r\n# '''\r\n# import os\r\n# size=os.get_terminal_size()\r\n# print(size)\r\n\r\n# '''\r\n# 执行命令\r\n# '''\r\n# import subprocess\r\n# text_bytes=subprocess.check_output(['netatat','-a'])\r\n# print(text_bytes)\r\n\r\n'''\r\nwebbrowser打开游览器\r\n'''\r\n# import webbrowser\r\n# # 使用默认的浏览器打开\r\n# webbrowser.open('http://www.baidu.com')\r\n\r\n# # 获取Chrome浏览器\r\n# wb=webbrowser.get('chrome')\r\n# wb.open('http://www.baidu.com')"
}
] | 17 |
rraid/navi-v2
|
https://github.com/rraid/navi-v2
|
241b3bdb1c5bc96dd90c131cc3747593f31c60ed
|
9b4bff86791cf81fc00f6c9bd16cb257f9949afb
|
9a4fc8275473e5349ad53344cb7abdf520f0bf81
|
refs/heads/master
| 2020-06-25T03:22:19.032506 | 2018-02-16T20:45:05 | 2018-02-16T20:45:05 | 96,954,800 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6162601709365845,
"alphanum_fraction": 0.6439024209976196,
"avg_line_length": 17.636363983154297,
"blob_id": "f666c412f881f1b91de29a8aaaf5ffa7da7e271d",
"content_id": "8e72f4a8e0d1756e8d2b06570f24898bc29444bf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 615,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 33,
"path": "/examples/testZed.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\nimport sys\nsys.path.append(\"../device/\")\nimport devhub\nimport numpy as np\nimport cv2\nimport time\nimport signal\n\nstopTest = False\n\ndef stopsigHandler(signo, frame):\n stopTest = True\n devhub.stop()\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n signal.signal(signal.SIGINT, stopsigHandler)\n print \"Press Ctrl+C to stop\"\n devhub.init()\n\n while not stopTest:\n\n frame = devhub.getZedFrame()\n if type(frame) == type(None):\n time.sleep(0.01)\n continue\n \n cv2.imshow(\"zed depth\", frame) \n \n #cv2.imshow(\"zed depth\", np.clip(frame / 10.0, 0.0, 1.0))\n cv2.waitKey(1)\n"
},
{
"alpha_fraction": 0.5495867729187012,
"alphanum_fraction": 0.6121605634689331,
"avg_line_length": 19.658536911010742,
"blob_id": "3a01e18cb28372dc8b5838c5f99b47bf6c5f64aa",
"content_id": "733858d2079a0f12c91879cb18a0557b4bcdd8b5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1694,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 82,
"path": "/device/arduino/navi_v2-Mega/navi_v2-Mega.ino",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "//////////////////////PINS//////////////////////\n/*\n GPS: VIN: 3.3V RX: 11 TX: 12\n Sonar Value: Top from l to r: 24,26,28,30\n botton from l to r: 22,23,25,27,29\n*/\n///////////////////////////////////////////////\n#include <Wire.h>\n#include <TinyGPS++.h>\n#include <SoftwareSerial.h>\n#include \"compass.h\"\n\n//Compass\nint heading = 0;\n\n//GPS\nstatic const int RXPin = 11, TXPin = 12;\nstatic const uint32_t GPSBaud = 57600;\nTinyGPSPlus gps;\nSoftwareSerial ss(RXPin, TXPin);\ndouble latitude = 40.521788;\ndouble longitude = -74.4608355;\n\n#define BUFSIZE 256\nconst int safesize = BUFSIZE / 2;\nchar write_buffer[BUFSIZE];\n\nint delayGPS;\n\nvoid setup() {\n Serial.begin(57600);\n ss.begin(GPSBaud);\n Wire.begin();\n compass_x_offset = -112.66;\n compass_y_offset = 992.60;\n compass_z_offset = 546.70;\n compass_x_gainError = 1.01;\n compass_y_gainError = 1.09;\n compass_z_gainError = 0.99;\n\n compass_init(2);\n}\n\nvoid loop()\n{\n getGPS();\n //getsonar_value();\n //getCompass();\n writeSerial();\n}\n\nvoid getGPS() {\n while (ss.available()){\n gps.encode(ss.read());\n }\n if (gps.location.isValid()) {\n latitude = gps.location.lat();\n longitude = gps.location.lng();\n }\n}\n\nvoid getCompass(){\n compass_heading();\n heading = (int)(bearing + 260) % 360;\n}\n\nchar ftos [safesize];\nvoid writeSerial()\n{\n memset(write_buffer, '\\0', BUFSIZE);\n strcat(write_buffer, \"[\");\n dtostrf(latitude, 20, 10, ftos);\n strcat(write_buffer, ftos);\n strcat(write_buffer, \",\");\n dtostrf(longitude, 20, 10, ftos);\n strcat(write_buffer, ftos);\n strcat(write_buffer, \",\");\n dtostrf(heading, 20, 10, ftos);\n strcat(write_buffer, ftos);\n strcat(write_buffer, \"]\\n\");\n Serial.write(write_buffer);\n}\n"
},
{
"alpha_fraction": 0.6724832057952881,
"alphanum_fraction": 0.6808724999427795,
"avg_line_length": 32.11111068725586,
"blob_id": "55d20b34575a708ad897299b024a366049c60eac",
"content_id": "34a80151594785eedc51ee8f75d65eb65d9318d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2980,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 90,
"path": "/perception/perceptbox.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import sys, math\nsys.path.append(\"../device/\")\nimport perception\nimport devhub\nfrom threading import Thread\nimport time\nimport numpy as np\n\nclass PerceptBox(Thread):\n \"\"\"\n Acts as the front end for the perception unit. This unit contains two buffers:\n 1) Global Buffer: a buffer which contains (GPS, Compass) readings\n 2) Local Buffer: a buffer which contains more accurate stereo readings\n\n Note that this is the first version of the box which uses a mean-shifted\n correlation technique to connect the local and global frames.\n \"\"\"\n def __init__(self, gpsCallback=None, compassCallback=None, stereoPoseCallback=None):\n \"\"\" Constructor\n Arguments:\n - gpsCallback: a function which returns the gps reading (x, y)\n - compassCallback: a function which returns the compass reading theta\n - stereoPoseCallback: a function which returns (x, y, theta)\n \"\"\"\n assert(type(gpsCallback) != type(None))\n assert(type(compassCallback) != type(None))\n assert(type(stereoPoseCallback) != type(None))\n\n Thread.__init__(self)\n\n # start by defining a buffer to store the GPS and the robot\n self.bufmax = 2000 # max 2000 readings\n self.globalBuffer = np.zeros((self.bufmax, 3))\n self.localBuffer = np.zeros((self.bufmax, 3))\n\n # create a counter for current frame index\n self.frameid = 0\n self.gpsCallback = gpsCallback\n self.compassCallback = compassCallback\n self.stereoPoseCallback = stereoPoseCallback\n self.globalPose = None\n self.localPose = None\n\n def update(self):\n \"\"\" Update the current pose based on the buffers of stored data\n \"\"\"\n # combine GPS and compass into [x, y, theta]\n self.globalPose = np.concatenate(\n [self.gpsCallback(), self.compassCallback()], axis=0)\n self.localPose = self.stereoPoseCallback()\n\n self.globalBuffer[self.frameid, :] = self.globalPose\n self.localBuffer[self.frameid, :] = self.localPose\n\n cv2.imwrite(\"image/\" + str(self.frameid) + \".png\", devhub.depthImage)\n \n self.frameid = (self.frameid + 1) % self.bufmax\n\n def run(self):\n while True:\n self.update()\n\n def getPose(self):\n \"\"\" \n Returns:\n - pose (x, y, theta)\n \"\"\"\n\n # calculate mean theta offset\n thetaOffset = np.mean(self.globalBuffer[:,2:] - self.localBuffer[:,2:])\n\n # Use the angle to rotate the local position and obtain the offset\n s = math.sin(math.radians(thetaOffset))\n c = math.cos(math.radians(thetaOffset))\n newpos = np.dot(self.localBuffer[:,:2], np.array([[c, -s], [s, c]]))\n posOffset = [np.mean(self.globalBuffer[:,0] - newpos[:,0]),\n np.mean(self.globalBuffer[:,1] - newpos[:,1])]\n\n return np.concatenate((newpos[self.frameid] + posOffset,\n [self.localBuffer[0][2] + thetaOffset]))\n\n ## Callbacks ##\n def assignGPSCallback(self, fn):\n self.gpsCallback = fn\n\n def assignCompassCallback(self, fn):\n self.compassCallback = fn\n\n def assignStereoPoseCallback(self, fn):\n self.stereoPoseCallback = fn\n"
},
{
"alpha_fraction": 0.6278446912765503,
"alphanum_fraction": 0.6365461945533752,
"avg_line_length": 21.17910385131836,
"blob_id": "1f7d385ea955970c979f7e2a60a5f0314d5e1e3e",
"content_id": "93a131984f87016dfc0c952134b4bb63e5ef5026",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1494,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 67,
"path": "/examples/testPerceptbox.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\nimport sys\nsys.path.append(\"../perception/\")\nimport numpy as np\nsys.path.append(\"../device/\")\nimport devhub\nimport cv2\nimport pickle\nimport signal\nimport time\n\nmat = None\nstopTest = False\n\nbuff = []\n\ndef stopsigHandler(signal, frame):\n global buff\n \n # save everything\n with open(\"/media/nvidia/naviData/data\", \"wb\") as fp:\n pickle.dump(buff, fp)\n stopTest = True\n devhub.stop()\n sys.exit(0)\n\nif __name__ == \"__main__\":\n\n global buff\n global mat\n global matColor\n global stopTest\n \n \n signal.signal(signal.SIGINT, stopsigHandler)\n print(\"Press Ctrl+C to stop\")\n devhub.init()\n time.sleep(1)\n\n # now that the box has been anchored, we can attempt to test out the localizer\n while not stopTest:\n\n mat = devhub.getZedReadings()\n if type(mat) == type(None):\n continue\n if type(mat) != np.ndarray:\n time.sleep(0.01)\n continue\n #matColor = devhub.getZedFrame()\n #if type(matColor) == type(None):\n # continue\n #if type(matColor) != np.ndarray:\n # time.sleep(0.01)\n # continue\n cv2.imshow(\"zed depth\", mat)\n cv2.waitKey(1)\n \n idx = len(buff)\n buff.append(\"zedFrame\" + str(idx) + \" GPS: \" + str(devhub.getGPSReadings()) + \" Compass: \" + str(devhub.getCompassReadings()))\n #buff.append(\"zedFrame\" + str(idx))\n np.save(\"/media/nvidia/naviData/testRun/\" + str(idx),mat)\n np.save(\"/media/nvidia/naviData/testRun/Color\" + str(idx),matColor)\n \n \n #gps\n #compass\n #zedPose\n \n\n\n\n"
},
{
"alpha_fraction": 0.6710526347160339,
"alphanum_fraction": 0.719298243522644,
"avg_line_length": 25.30769157409668,
"blob_id": "6c80051b5440ab063897dfce06b448f47ad1fe36",
"content_id": "02a1153fe212217e2a9220d17a6d8a5040d445dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 684,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 26,
"path": "/examples/testCompass.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append(\"../perception\")\nsys.path.append(\"../device/\")\nimport devhub\nimport perception\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport cv2\nfrom matplotlib import cm\n\ndef displayDistribution(name, grid):\n plt.imshow(np.flipud(grid) * 255.0, cmap=cm.gray)\n plt.show()\n\ndevhub.init()\ntime.sleep(1)\n\nwhile True:\n values = devhub.getCompassReadings()\n distribution = perception.getCompassDistribution(values)\n distribution = np.repeat(distribution, 10, axis=0)\n plot = np.repeat(np.reshape(np.arange(0.0, 1.0, 0.01) + 0.01, (100, 1)), 360, axis=1)\n plot = (plot <= distribution) * 1.0\n cv2.imshow(\"plot\", np.flipud(plot))\n cv2.waitKey(10)\n"
},
{
"alpha_fraction": 0.6215807795524597,
"alphanum_fraction": 0.6259439587593079,
"avg_line_length": 32.235633850097656,
"blob_id": "03df50c974b69c8c623491b58e9752418cd6cb2c",
"content_id": "2d200725edf5262e3c4367d126d3d92626f9349b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5959,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 174,
"path": "/device/src/zed.cpp",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "///////////////////////////////////////////////////////////////////////////\r\n//\r\n// Copyright (c) 2017, STEREOLABS.\r\n//\r\n// All rights reserved.\r\n//\r\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n//\r\n///////////////////////////////////////////////////////////////////////////\r\n\r\n\r\n/***************************************************************************************************\r\n ** This sample demonstrates how to grab images and depth map with the ZED SDK **\r\n ** and apply the result in a 3D view \"point cloud style\" with OpenGL /freeGLUT **\r\n ** Some of the functions of the ZED SDK are linked with a key press event\t\t **\r\n ***************************************************************************************************/\r\n\r\n// Standard includes\r\n#include <stdio.h>\r\n#include <string.h>\r\n#include <math.h>\r\n#include \"libzed.h\"\r\n\r\n// ZED includes\r\n#include <sl/Camera.hpp>\r\n\r\n//// Using std and sl namespaces\r\nusing namespace std;\r\nusing namespace sl;\r\n\r\n//// Create ZED object (camera, callback, images)\r\nsl::Camera zed;\r\nsl::Mat depth_image;\r\n\r\nsl::Pose pose;\r\nstd::vector<size_t> cl;\r\n\r\nsl::TRACKING_STATE tracking_state;\r\n\r\nsl::Transform camera_projection;\r\n\r\nstd::thread zed_callback;\r\n\r\n// Spatial Mapping status\r\nbool mapping_is_started = false;\r\nstd::chrono::high_resolution_clock::time_point t_last;\r\n\r\nbool quit = false;\r\nbool zedGrab = false;\r\n\r\nbool zed_open() {\r\n\r\n // Setup configuration parameters for the ZED\r\n InitParameters initParameters;\r\n initParameters.camera_resolution = RESOLUTION_HD720;\r\n initParameters.depth_mode = DEPTH_MODE_PERFORMANCE; //need quite a powerful graphic card in QUALITY\r\n initParameters.coordinate_system = COORDINATE_SYSTEM_RIGHT_HANDED_Z_UP; // OpenGL's coordinate system is right_handed\r\n initParameters.coordinate_units = UNIT_METER; // set meter as the OpenGL world will be in meters\r\n //printf(\"Initalized Basic\");\r\n \r\n // Open the ZED\r\n ERROR_CODE err = zed.open(initParameters);\r\n if (err != SUCCESS) {\r\n printf(\"[ERROR] Cannot open zed!\\n\");\r\n zed.close();\r\n return false; // Quit if an error occurred\r\n }\r\n //printf(\"Zed opened\");\r\n \r\n // Set positional tracking parameters\r\n TrackingParameters trackingParameters;\r\n trackingParameters.initial_world_transform = sl::Transform::identity();\r\n trackingParameters.enable_spatial_memory = true; // Enable Spatial memory\r\n //printf(\"Initalized tracking\");\r\n \r\n zed.enableTracking(trackingParameters);\r\n //printf(\"Enabled Tracking\");\r\n \r\n zed_callback = std::thread(zed_run);\r\n //printf(\"Started Thread\");\r\n \r\n return true;\r\n}\r\n\r\nvoid zed_run(){\r\n\r\n // Get the distance between the center of the camera and the left eye\r\n float translation_left_to_center = zed.getCameraInformation().calibration_parameters.T.x * 0.5f;\r\n while (!quit){\r\n zedGrab = zed.grab();\r\n if (zedGrab == SUCCESS) {\r\n // Get the position of the camera in a fixed reference frame (the World Frame)\r\n TRACKING_STATE tracking_state = zed.getPosition(pose, sl::REFERENCE_FRAME_WORLD);\r\n if (tracking_state == TRACKING_STATE_OK) {\r\n // Get the pose at the center of the camera (baseline/2 on X axis)\r\n //transformPose(pose.pose_data, translation_left_to_center); \r\n sl::Transform transform_;\r\n transform_.setIdentity();\r\n // Move the tracking frame by tx along the X axis\r\n transform_.tx = translation_left_to_center;\r\n // Apply the transformation\r\n pose.pose_data = Transform::inverse(transform_) * pose.pose_data * transform_;\r\n }\r\n }\r\n else sl::sleep_ms(1);\r\n }\r\n}\r\n\r\n\r\n\r\n\r\n/**\r\n * This function frees and close the ZED, its callback(thread) and the viewer\r\n **/\r\nvoid zed_close() {\r\n printf(\"Quitting C++\\n\");\r\n quit = true;\r\n zed_callback.join();\r\n\r\n zed.close();\r\n}\r\n\r\nbool getPose(void* poseGet){\r\n if (zedGrab != SUCCESS){\r\n sl::sleep_ms(1);\r\n return false;\r\n }\r\n // Get quaternion, rotation and translation\r\n // Only use Euler angles to display absolute angle values. Use quaternions for transforms.\r\n sl::float3 rotation = pose.getEulerAngles();\r\n sl::float3 translation = pose.getTranslation();\r\n ((float*)poseGet)[0] = translation.x;\r\n ((float*)poseGet)[1] = translation.y;\r\n ((float*)poseGet)[2] = translation.z;\r\n ((float*)poseGet)[3] = rotation.x;\r\n ((float*)poseGet)[4] = rotation.y;\r\n ((float*)poseGet)[5] = rotation.z;\r\n return true;\r\n}\r\n\r\nbool grabDepthFrame(void* dst) {\r\n if (zedGrab != SUCCESS){\r\n sl::sleep_ms(1);\r\n return false;\r\n }\r\n zed.retrieveMeasure(depth_image, sl::MEASURE_DEPTH);\r\n memcpy(dst, (void *)depth_image.getPtr<sl::float1>(sl::MEM_CPU),\r\n 4 * depth_image.getHeight() * depth_image.getWidth());\r\n return true;\r\n}\r\n\r\nbool grabFrame(void* dst){\r\nsl::Mat zed_image;\r\nif (zed.grab() == SUCCESS) {\r\n // Retrieve the left image in sl::Mat\r\n // The cv::Mat is automatically updated\r\n zed.retrieveImage(zed_image, VIEW_LEFT);\r\n // Display the left image from the cv::Mat object\r\n memcpy(dst, (void *)zed_image.getPtr<sl::float1>(sl::MEM_CPU),\r\n 4 * depth_image.getHeight() * depth_image.getWidth());\r\n return true;\r\n }\r\n return false;\r\n}\r\n\r\n"
},
{
"alpha_fraction": 0.5452229380607605,
"alphanum_fraction": 0.5740976929664612,
"avg_line_length": 19.831857681274414,
"blob_id": "13046aab4f529863ac61275db432c33cd162dc55",
"content_id": "02a07a310ff988733481020a6d296b50072c6f92",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2355,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 113,
"path": "/device/arduino/navi_v2-Uno/navi_v2-Uno.ino",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "//////////////////////PINS//////////////////////\n/*\n Magnometer: VIN: 3.3V SDA: 20 SCL: 21\n Motor - R: 9 L: 10\n*/\n///////////////////////////////////////////////\n#include <FRCmotor.h>\n\nint gamemode = 1; // Enables the FRCmotor library\n\nint leftVel = 0;\nint rightVel = 0;\nint lasttime;\n\nint softstop;\n\n//SerialRead/Write\n#define BUFSIZE 256\n#define SPEED_LIMIT 100\n\n\nconst int safesize = BUFSIZE / 2;\nchar buf[BUFSIZE];\nchar write_buffer[BUFSIZE];\nint available_bytes = 0;\n\n// Target and previous velocity arrays\nint target_vel[] = {0 , 0};\n\n\n//Motor Control\nFRCmotor leftMotor; //DECLARE LEFT MOTOR CONTROLLER\nFRCmotor rightMotor; //DECLARE RIGHT MOTOR CONTROLLER\n\n\nvoid setup() {\n leftMotor.SetPort(11); //DECLARE ARDUINO PORT FOR MOTOR CONTROLLER SIGNAL\n rightMotor.SetPort(10);\n leftMotor.Set(0); //SET INITIAL MOTOR VALUES TO ZERO\n rightMotor.Set(0); //(100 MAX FORWARD, -100 MAX BACK)\n \n Serial.begin(57600);\n lasttime = millis();\n softstop = millis();\n}\n\nvoid loop() \n{\n readSerial();\n ramp();\n moveMotor();\n delay(50);\n}\n\nvoid readSerial()\n{\n if ((available_bytes = Serial.available()))\n {\n // Read + attach null byte to read string\n int obytes = strlen(buf);\n Serial.readBytes(&buf[obytes], available_bytes);\n buf[available_bytes + obytes] = '\\0';\n if(strlen(buf) > safesize){\n memmove(buf,&buf[strlen(buf) - safesize],safesize);\n buf[safesize] = '\\0';\n }\n char *s, *e;\n if ((e = strchr(buf, '\\n')))\n {\n e[0] = '\\0';\n if ((s = strrchr(buf, '[')))\n {\n sscanf(s, \"[%d,%d]\\n\", &target_vel[0], &target_vel[1]);\n target_vel[0] = constrain(target_vel[0],-SPEED_LIMIT,SPEED_LIMIT);\n target_vel[1] = constrain(target_vel[1],-SPEED_LIMIT,SPEED_LIMIT);\n\n }\n memmove(buf, &e[1], strlen(&e[1]) + sizeof(char));\n }\n //softstop = millis();\n }\n //if(millis() - softstop > 5000){\n //Serial.print(\"SOFTSTOP\");\n //target_vel[0] = 0;\n //target_vel[1] = 0;\n //}\n}\n\n\nvoid ramp(){\n if(millis() - lasttime > 50){\n if(leftVel< target_vel[0]){\n leftVel++;\n }\n if(leftVel> target_vel[0]){\n leftVel--;\n }\n if(rightVel< target_vel[1]){\n rightVel++;\n }\n if(rightVel> target_vel[1]){\n rightVel--;\n }\n lasttime = millis();\n }\n}\n\nvoid moveMotor()\n{\n leftMotor.Set(-1*leftVel);\n rightMotor.Set(rightVel);\n\n}\n\n"
},
{
"alpha_fraction": 0.6334661245346069,
"alphanum_fraction": 0.6533864736557007,
"avg_line_length": 31.7391300201416,
"blob_id": "c500ea2dd9858a769d7807cbd4bc9d4f55049e61",
"content_id": "748062da3773cafa9c671254d580e4a36bace079",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 753,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 23,
"path": "/examples/testGPS.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append(\"../perception/\")\nsys.path.append(\"../device/\")\nimport perception\nimport devhub\nimport numpy as np\nimport cv2\nif __name__ == \"__main__\":\n devhub.init()\n grid = np.flipud(cv2.imread(\"../perception/pathmap_scaled.png\", cv2.IMREAD_GRAYSCALE) / 2)\n #grid = np.array([np.copy(grid), np.zeros(grid.shape), np.zeros(grid.shape)]) \n #grid = np.rollaxis(grid, -1)\n #grid = np.rollaxis(grid, -1)\n while True:\n position = devhub.getGPSReadings()\n #print position\n gps = perception.getGPSDistribution(position)\n #gps = np.rollaxis(np.array([gps, gps, gps]), -1)\n #gps = np.rollaxis(gps, -1)\n #gps = gps + np.flipud(grid)\n #print gps.shape\n cv2.imshow(\"dsaf;lk\",np.flipud(gps + grid)*255)\n cv2.waitKey(10)\n"
},
{
"alpha_fraction": 0.5092592835426331,
"alphanum_fraction": 0.59375,
"avg_line_length": 36.565216064453125,
"blob_id": "7ad16effbf0573035e7fd9c22a3868ca8d557766",
"content_id": "26edbd67ff92fe9f06233c6b902866fabc4da3d7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 23,
"path": "/agent/display.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom skimage.draw import circle, line\n\ndef displayVelocities(left, right, shape=(200, 200)):\n # NOTE: For display purposes only! To use as a distribution, call np.flipud\n # on the output\n r = shape[0]\n c = shape[1]\n # draw a circle representing the robot shape\n rr1, cc1 = circle(r / 2, c / 2, r / 10, shape)\n # draw lines representing the robot left wheel\n rr2, cc2 = line(r / 2, c / 4, r / 2 + int(left * 100), c / 4)\n rr3, cc3 = line(r / 2, c * 3 / 4, r / 2 + int(right * 100), c * 3 / 4)\n diff = (right - left) * 100\n avg = (right + left / 2) * 100\n rr4, cc4 = line(r / 2, c / 2, r / 2 + int(avg), c / 2 + int(diff))\n # push the drawings onto a color image\n img = np.zeros((r, c, 3), dtype=np.uint8)\n img[rr1, cc1, :2] = 255\n img[rr2, cc2, 1:] = 255\n img[rr3, cc3, 1:] = 255\n img[rr4, cc3, 1] = 255\n return np.flipud(img)\n"
},
{
"alpha_fraction": 0.7035398483276367,
"alphanum_fraction": 0.7035398483276367,
"avg_line_length": 14,
"blob_id": "a16aac2b87316f406ebdd95cdd51c0b60f70932f",
"content_id": "7cf29a81e6288667fcb5149c4751b12305d2e778",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 226,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 15,
"path": "/device/src/libzed.h",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "#ifndef libzed_h\n#define libzed_h\n\n#include <sl/Camera.hpp>\nusing namespace sl;\n\nextern \"C\"{\nbool zed_open();\nvoid zed_close();\nvoid zed_run();\nbool grabDepthFrame(void*);\nbool getPose(void*);\nbool grabFrame(void*);\n}\n#endif\n\n"
},
{
"alpha_fraction": 0.6177276968955994,
"alphanum_fraction": 0.6500152349472046,
"avg_line_length": 19.778480529785156,
"blob_id": "37aa5f81d7edfd4d701b196b4252e61be39824f2",
"content_id": "012f094ee6183bc263f91d1cde69aebe73688374",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3283,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 158,
"path": "/device/devhub.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport serial\nfrom threading import Thread, Event, Lock\nimport zed\nimport time\nimport struct\nimport cv2\nimport sys\nimport math\n\n\n# Globals\ndepthColumns = []\ncolorImage = []\ndepthImage = []\nimage = []\nlatitude = None\nlongitude = None\nheading = None\n\nrot = np.array([0,0,0])\ntsln = np.array([0,0,0])\n\n\n## Left, Right\nmotorVelocity = [0,0]\n\n## Y, X\nmapShape = [290,315]\n## Y, X\nmapBL = [40.52119,-74.462088]\nmapTR = [40.523815,-74.458337]\n\narduinoMega = None\narduinoUno = None\nrosReader = None\n\ndef getGPSReadings():\n values = (latitude, longitude)\n if type(values) == type(None) or values[0] == None:\n return None\n x = ((longitude - mapBL[1]) / (mapTR[1] - mapBL[1])) * mapShape[1]\n y = ((latitude - mapBL[0]) / (mapTR[0] - mapBL[0])) * mapShape[0] \n return x,y\n\ndef getZedReadings():\n global depthColumns\n global depthImage\n depthImage = zed.grabDepthFrame()\n if type(depthImage) == type(None):\n return None\n dataMid = depthImage.shape[0]/2\n subImage = depthImage[dataMid-50:dataMid+50,:]\n depthColumns = np.amin(subImage, axis=0)\n return depthImage\n\ndef getZedFrame():\n global image\n image = zed.grabFrame()\n return image\n\ndef getZedPose():\n return zed.getPose()\n\n\ndef getCompassReadings():\n global heading\n if heading == None:\n return None\n return (heading + 24) % 360\n \ndef setMotorVelocity(left, right):\n global motorVelocity\n left = np.clip(left,-1.0,1.0)\n right = np.clip(right,-1.0,1.0)\n motorVelocity = [left,right]\n \ndef getMotorVelocity():\n return motorVelocity\n\nclass ArduinoListener(Thread):\n\n def __init__(self, idName):\n print \"Ard. listener Started\"\n Thread.__init__(self)\n self.arduino = serial.Serial(\"/dev/\" + idName ,57600)\n self.stopstate = False\n\n def run(self):\n while not self.stopstate:\n self.readSerial()\n #time.sleep(0.1) # 10 MHz refresh\n\n def readSerial(self):\n global latitude\n global longitude\n global heading\n\n if self.arduino.in_waiting > 0:\n buff = None;\n buff = self.arduino.readline()\n prev = 1\n count = 0\n devType = None\n if buff[0] == '[':\n try:\n buff = eval(buff.strip())\n (latitude,longitude,heading) = buff\n except:\n print \"Failed Serial Read\"\n\n def stop(self):\n self.stopstate = True\n\nclass ArduinoPublisher(Thread):\n \n def __init__(self, idName):\n print \"Ard. publisher Started\"\n Thread.__init__(self)\n self.arduino = serial.Serial(\"/dev/\" + idName ,57600)\n self.stopstate = False\n\n def run(self):\n while not self.stopstate:\n self.writeSerial()\n time.sleep(0.1) # 10 MHz refresh\n\n def writeSerial(self):\n global motorVelocity\n left = motorVelocity[0]\n right = motorVelocity[1]\n #print left,right\n writeBuff = \"[\"+ str(int(left*40)) + \",\" + str(int(right*40)) + \"]\\n\"\n self.arduino.write(writeBuff)\n\n def stop(self):\n self.stopstate = True\n\ndef init():\n global arduinoMega\n global arduinoUno\n zed.open()\n arduinoMega = ArduinoListener(\"ttyACM0\")\n arduinoMega.start()\n #arduinoUno = ArduinoPublisher(\"ttyACM0\")\n #arduinoUno.start()\n\n\ndef stop():\n global arduinoMega\n global arduinoUno\n arduinoMega.stop()\n arduinoMega.join()\n #arduinoUno.stop()\n #arduinoUno.join()\n zed.close()\n time.sleep(1)\n sys.exit()\n"
},
{
"alpha_fraction": 0.41046831011772156,
"alphanum_fraction": 0.586776852607727,
"avg_line_length": 18.105262756347656,
"blob_id": "ce7fdbdd25e0fd042ff0f6ff10f357f71b20886b",
"content_id": "f98c6ccabb066dd7682346b9e984711f6543b859",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 363,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 19,
"path": "/examples/projectionTest.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append(\"../perception/\")\nfrom projectedMesh import project3DMeshTo2DGrid\nimport numpy as np\n\ntriangles = np.array([\n [0, 1, 2],\n [3, 1, 2],\n [3, 0, 2],\n [1, 0, 4]])\n\nvertices = [\n (100, 100, 100),\n (100, 200, 300),\n (200, 100, 50),\n (200, 200, 200),\n (50, 50, 0)]\n\nproject3DMeshTo2DGrid(vertices, triangles, (100.0, 100.0))\n"
},
{
"alpha_fraction": 0.6653671264648438,
"alphanum_fraction": 0.6900584697723389,
"avg_line_length": 26.48214340209961,
"blob_id": "14d7b4a2892dd86c65d0eeddbf64ae656899ca61",
"content_id": "4f328ce87b0ab6084978e7d9ab79b1a40fc6589a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1539,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 56,
"path": "/device/zed.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import ctypes, os\nimport numpy as np\nimport cv2\n\npath = \"/home/nvidia/navi-v2/device/build/libzed.so\"\nlibzed = ctypes.cdll.LoadLibrary(path)\nlibzed.zed_open.resType = ctypes.c_bool\nlibzed.zed_close.resType = None\nlibzed.zed_run.resType = None\nlibzed.grabDepthFrame.resType = ctypes.c_bool\nlibzed.grabDepthFrame.argTypes = [ctypes.c_void_p]\nlibzed.grabFrame.resType = ctypes.c_bool\nlibzed.grabFrame.argTypes = [ctypes.c_void_p]\nlibzed.getPose.resType = ctypes.c_bool\nlibzed.getPose.argTypes = [ctypes.c_void_p]\n\ndef open():\n return libzed.zed_open()\n\ndef close():\n libzed.zed_close()\n\ndef run():\n libzed.zed_run()\n \ndef grabDepthFrame():\n image_pointer = ctypes.cast((ctypes.c_float * 720 * 1280)(), \\\n ctypes.POINTER(ctypes.c_float))\n res = libzed.grabDepthFrame(image_pointer)\n if res == False:\n return None\n else:\n image = np.ctypeslib.as_array(image_pointer, shape=(720, 1280))\n image[np.isnan(image)] = 0.0\n image[np.isinf(image)] = 0.0\n return image\n \ndef getPose():\n pose_pointer = ctypes.cast((ctypes.c_float * 6)(), ctypes.POINTER(ctypes.c_float))\n res = libzed.getPose(pose_pointer)\n if res == False:\n return None\n else:\n pose = np.ctypeslib.as_array(pose_pointer, shape = (1,6))\n return pose[0,:]\n\n\ndef grabFrame():\n image_pointer = ctypes.cast((ctypes.c_float * 720 * 1280)(), \\\n ctypes.POINTER(ctypes.c_float))\n res = libzed.grabFrame(image_pointer)\n if res == False:\n return None\n else:\n image = np.ctypeslib.as_array(image_pointer, shape=(720, 1280))\n return image\n"
},
{
"alpha_fraction": 0.6742857098579407,
"alphanum_fraction": 0.691428542137146,
"avg_line_length": 10.666666984558105,
"blob_id": "d82c5579ec1f8f3ec3616f933fdb5ae91b411994",
"content_id": "08a3c2d8042cbbc05353c7cc312ee8ddbe7dc9ce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 15,
"path": "/examples/testZedPose.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append(\"../device/\")\nimport zed\nimport numpy as np\nimport cv2\nimport time\n\nzed.open()\n\nwhile True:\n print zed.getPose()\n time.sleep(.01)\n \n\nzed.close()\n"
},
{
"alpha_fraction": 0.6779388189315796,
"alphanum_fraction": 0.6892109513282776,
"avg_line_length": 22.846153259277344,
"blob_id": "31660f1c9dbe1449abb667ed1e63e94c47b01248",
"content_id": "d644ff58fcefb160b55d1637bc44a6c5979a49d6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 621,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 26,
"path": "/examples/testDevices.py",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append(\"../device/\")\nimport devhub\nimport signal\nimport time\n\ntestMotors = False\nstopTest = False\n\ndef stopsigHandler(signo, frame):\n stopTest = True\n devhub.stop()\n sys.exit(0)\n\nif __name__ == \"__main__\":\n signal.signal(signal.SIGINT, stopsigHandler)\n print \"Press Ctrl+C to stop\"\n devhub.init()\n while not stopTest:\n print \"Zed:\", devhub.getZedReadings()\n print \"GPS:\", devhub.getGPSReadings()\n print \"Heading\", devhub.getCompassReadings()\n time.sleep(0.1) # 10 MHz refresh\n #print \"Buttons:\", devhub.getButtonReadings()\n if testMotors:\n devhub.setMotorVelocity(0, 0)\n\n"
},
{
"alpha_fraction": 0.7234042286872864,
"alphanum_fraction": 0.7446808218955994,
"avg_line_length": 22.5,
"blob_id": "520f5222e84fdfe19e683f135d592c7d2b55070e",
"content_id": "f6eb19f192092f397517f371aff11a2d6b24b8b1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 47,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 2,
"path": "/README.md",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "# navi-v2\nThe second version of the navi code.\n"
},
{
"alpha_fraction": 0.7755101919174194,
"alphanum_fraction": 0.7755101919174194,
"avg_line_length": 27,
"blob_id": "da7fdcec88efbea4eb9e907b8690de3580167f05",
"content_id": "3fff45f922402b328c3239c763713b14f233553f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 392,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 14,
"path": "/perception/README.md",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "To use the alternate chili library for python, do the following:\n\n```bash\nmake\nsudo ldconfig\n```\n\nNow you should have `libchili.so` in the working directory. There currently is\nno option to install it globally, since its a hack, but you can certainly make\none if you wish to do so.\n\n\nWebsite used to find distances between coordinate points\nhttp://www.movable-type.co.uk/scripts/latlong.html\n"
},
{
"alpha_fraction": 0.7271474599838257,
"alphanum_fraction": 0.7404685616493225,
"avg_line_length": 30.100000381469727,
"blob_id": "7eb7b183c9c412b972a7ee28ab693db96436f7a6",
"content_id": "0d6c86d88476a7f71f007450504f20478deac80d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 2177,
"license_type": "permissive",
"max_line_length": 173,
"num_lines": 70,
"path": "/device/CMakeLists.txt",
"repo_name": "rraid/navi-v2",
"src_encoding": "UTF-8",
"text": "SET(execName zed)\n\nCMAKE_MINIMUM_REQUIRED(VERSION 2.4)\n\nif(COMMAND cmake_policy)\n cmake_policy(SET CMP0003 OLD)\n cmake_policy(SET CMP0015 OLD)\nendif(COMMAND cmake_policy)\n\nSET(EXECUTABLE_OUTPUT_PATH \".\")\n\nSET(SPECIAL_OS_LIBS \"\")\nSET(VERSION_REQ_CUDA \"8.0\")\n\nfind_package(ZED 2 REQUIRED)\nfind_package(GLUT REQUIRED)\nfind_package(GLEW REQUIRED)\nfind_package(OpenGL REQUIRED)\n\n##For Jetson, OpenCV4Tegra is based on OpenCV2.4\nexec_program(uname ARGS -p OUTPUT_VARIABLE CMAKE_SYSTEM_NAME2)\nif ( CMAKE_SYSTEM_NAME2 MATCHES \"aarch64\" )\n SET(VERSION_REQ_CUDA \"\")\n link_directories(\"/usr/lib/aarch64-linux-gnu/tegra\") ## on Jetson TX1 64bits, the correct libGL.so is located here (the default one will lead to linking error (undef ref))\n\n unset(CUDA_USE_STATIC_CUDA_RUNTIME CACHE)\n option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)\n message (\"CUDA_USE_STATIC_CUDA_RUNTIME : ${CUDA_USE_STATIC_CUDA_RUNTIME}\")\n ##to prevent from opencv_dep_cudart dependencies error...\n ## cmake with -DCUDA_USE_STATIC_CUDA_RUNTIME=false can also be called.\nendif()\n\nSET(SPECIAL_OS_LIBS \"pthread\" \"X11\")\nadd_definitions(-Wno-write-strings)\n\n\nfind_package(CUDA ${VERSION_REQ_CUDA} REQUIRED)\n\ninclude_directories(${ZED_INCLUDE_DIRS})\ninclude_directories(${GLEW_INCLUDE_DIRS})\ninclude_directories(${GLUT_INCLUDE_DIRS})\ninclude_directories(${CUDA_INCLUDE_DIRS})\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)\n\nlink_directories(${ZED_LIBRARY_DIR})\nlink_directories(${GLEW_LIBRARY_DIRS})\nlink_directories(${GLUT_LIBRARY_DIRS})\nlink_directories(${OpenGL_LIBRARY_DIRS})\nlink_directories(${CUDA_LIBRARY_DIRS})\nlink_directories(${CMAKE_CURRENT_SOURCE_DIR}/lib)\n\nSET(SRC_FOLDER src)\nFILE(GLOB_RECURSE SRC_FILES \"${SRC_FOLDER}/*.cpp\")\n\nSET(HEADER_FOLDER include)\nFILE(GLOB_RECURSE HEADER_FILES \"${HEADER_FOLDER}/*.hpp\" \"${HEADER_FOLDER}/*.h\")\n\nADD_LIBRARY(${execName} SHARED ${SRC_FILES} ${HEADER_FILES})\nadd_definitions(-std=c++0x -g -O3)\n\n# Add the required libraries for linking:\nTARGET_LINK_LIBRARIES(${execName}\n ${SPECIAL_OS_LIBS}\n ${ZED_LIBRARIES}\n ${OPENGL_LIBRARIES}\n ${GLUT_LIBRARY}\n ${GLEW_LIBRARY}\n ${CUDA_CUDA_LIBRARY} ${CUDA_CUDART_LIBRARY} ${CUDA_npp_LIBRARY}\n ${SPECIAL_OS_LIBS}\n )\n"
}
] | 18 |
jjaskirat-ssingh/DIOSA
|
https://github.com/jjaskirat-ssingh/DIOSA
|
5e4d5067840da6920adac162738e6f116e4f88a5
|
a789d6e7baf13b9b47a2ef30a5b501fae832a49a
|
a7e25e4fa1fa2548f3104c4a88fdcfb4ebaa5715
|
refs/heads/main
| 2023-06-14T00:25:58.622248 | 2022-01-11T19:00:10 | 2022-01-11T19:00:10 | 367,931,780 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6352941393852234,
"alphanum_fraction": 0.6352941393852234,
"avg_line_length": 20.375,
"blob_id": "cf8fbe45e357d9b7c6f1c47b54703eca52d1be65",
"content_id": "c917f7f1225d5f492cccaff13bb6459396746832",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 8,
"path": "/team/urls.py",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='team'),\n path('<int:member_id>', views.member, name='member'), \n]"
},
{
"alpha_fraction": 0.6423982977867126,
"alphanum_fraction": 0.6552462577819824,
"avg_line_length": 20.136363983154297,
"blob_id": "12b27436f15b7bb4e3c68a17be9d67dc6ccceb9e",
"content_id": "86ef72e97f17bd5a2cc1b7c4860e82432139fd43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 467,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 22,
"path": "/team/views.py",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404, render\n\n\nfrom .models import Member\n\ndef index(request):\n team = Member.objects.order_by('pass_year').filter(in_office=True)\n\n context = {\n 'team': team\n }\n\n return render(request, 'pages/index.html', context) \n\ndef Member(request, Member_id):\n Member = get_object_or_404(Member, pk=Member_id)\n\n context = {\n 'Member': Member\n }\n\n return render(request, 'pages/index.html', context) \n\n"
},
{
"alpha_fraction": 0.5051020383834839,
"alphanum_fraction": 0.8051020503044128,
"avg_line_length": 50.578948974609375,
"blob_id": "f4a6d8b54a3b210b5281e5b56c6047dc52190496",
"content_id": "27be0d9ae7d279f521a36fb87c88dc3d3606824e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 19,
"path": "/README.md",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "# DIOSA - Doon International Old Students Association \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nFrontend theme credits - https://bootstrapmade.com\n"
},
{
"alpha_fraction": 0.645952582359314,
"alphanum_fraction": 0.6598528027534485,
"avg_line_length": 47.959999084472656,
"blob_id": "37de7bbc11bb881637f1b8fbc89698ac6ed9268c",
"content_id": "7be90d713da5e424ced84ad51d1f3b3cc8c90dfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1223,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 25,
"path": "/events/models.py",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom datetime import datetime\n\n\n# Create your models here.\n\nclass Event(models.Model):\n title = models.CharField(max_length=200)\n venue = models.CharField(max_length=200)\n description = models.TextField(blank=True)\n event_date = models.DateField(blank=True)\n photo_main = models.ImageField(upload_to = 'photos/%Y/%m/%d/')\n photo_1 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_2 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_3 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_4 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_5 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_6 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_7 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_8 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_9 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n photo_10 = models.ImageField(upload_to = 'photos/%Y/%m/%d/', blank=True)\n display = models.BooleanField(default=True)\n def __str__(self):\n return self.title"
},
{
"alpha_fraction": 0.6889401078224182,
"alphanum_fraction": 0.6889401078224182,
"avg_line_length": 23.11111068725586,
"blob_id": "fdc99d0770c589af931d4eeaeb8c8f35d799e1b2",
"content_id": "30fbc2916b2d34b4b841fff1740aee58bd29e0a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 18,
"path": "/pages/views.py",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom team.models import Member\nfrom events.models import Event\n\ndef index(request):\n members = Member.objects.all().filter(in_office=True)\n events = Event.objects.all().filter(display=True)\n\n context = {\n 'members': members,\n 'events': events\n }\n\n return render(request, 'pages/index.html', context)\n\ndef about(request):\n return render(request, 'pages/about.html')\n"
},
{
"alpha_fraction": 0.6818181872367859,
"alphanum_fraction": 0.7045454382896423,
"avg_line_length": 37.5,
"blob_id": "12d487d7669130e247f5b2d37d814f25b9cc3080",
"content_id": "1d631520cfa259ac9227ba6e1f908e894eb7ad96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 616,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 16,
"path": "/team/models.py",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\n# Create your models here.\nclass Member(models.Model):\n name = models.CharField(max_length=200)\n photo = models.ImageField(upload_to='photos/%Y/%m/%d/')\n description = models.TextField(blank=True)\n contact = models.CharField(max_length=30, blank=True)\n email = models.CharField(max_length=200, blank=True)\n is_alum = models.BooleanField(default=True)\n in_office = models.BooleanField(default=False)\n post = models.CharField(max_length=200, blank=True)\n pass_year = models.CharField(max_length=200, blank=True)\n def __str__(self):\n return self.name\n"
},
{
"alpha_fraction": 0.5258570313453674,
"alphanum_fraction": 0.5444508790969849,
"avg_line_length": 46.80555725097656,
"blob_id": "2f2776d6d4c59ccd94721be8e334f0c9448a65b3",
"content_id": "64be42c0eae23aa6ed4ddebd0e2d37a2f3ccaba4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1721,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 36,
"path": "/events/migrations/0001_initial.py",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.3 on 2021-05-17 08:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200)),\n ('venue', models.CharField(max_length=200)),\n ('description', models.TextField(blank=True)),\n ('event_date', models.DateField(blank=True)),\n ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),\n ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_7', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_8', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_9', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('photo_10', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),\n ('display', models.BooleanField(default=True)),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.6340425610542297,
"alphanum_fraction": 0.6468085050582886,
"avg_line_length": 20.363636016845703,
"blob_id": "97d73a87e18290511225b23abf3927bc48a2f88c",
"content_id": "416ff463c852da7105531ccc5b4d7b752310c132",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 22,
"path": "/events/views.py",
"repo_name": "jjaskirat-ssingh/DIOSA",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404, render\n\nfrom .models import Event\n\ndef index(request):\n events = Event.objects.order_by('-event_date').filter(display=True)\n\n \n context = {\n 'events': events\n }\n\n return render(request, 'pages/index.html', context) \n\ndef event(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n\n context = {\n 'event': event\n }\n\n return render(request, 'events/event.html', context) "
}
] | 8 |
sgryjp/hm-font
|
https://github.com/sgryjp/hm-font
|
c9a1e69ed599c9221cf4c9daa17c8b2540e09df8
|
dfeb7348938dfebbd98e6d4dcc4b5a2ac4e7279a
|
c2d0ccfd4de0ab30055d5c787d3930e038af7bf3
|
refs/heads/master
| 2023-01-28T11:06:43.125792 | 2020-12-12T14:19:37 | 2020-12-12T14:19:37 | 277,134,912 | 1 | 1 |
NOASSERTION
| 2020-07-04T15:20:27 | 2020-07-05T08:45:43 | 2020-07-05T08:54:21 |
Python
|
[
{
"alpha_fraction": 0.7300000190734863,
"alphanum_fraction": 0.7350000143051147,
"avg_line_length": 21.22222137451172,
"blob_id": "f713c89d14057fa17be29f410b0a2e38908f7e65",
"content_id": "5b2e63627ac78fc1451b6b69211329dfb86174fc",
"detected_licenses": [
"mplus",
"MIT",
"Bitstream-Vera",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 324,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 9,
"path": "/README.md",
"repo_name": "sgryjp/hm-font",
"src_encoding": "UTF-8",
"text": "# HM Font\n\n「日本語文字も表示できる Hack」がコンセプトの日本語フォントです。\n[Hack](https://sourcefoundry.org/hack/) をベースに、Hack に含まれない文字を\n[M+ 1m](https://mplus-fonts.osdn.jp/) のグリフで補っています。\n\n## 画面表示例\n\n\n"
},
{
"alpha_fraction": 0.6679999828338623,
"alphanum_fraction": 0.671999990940094,
"avg_line_length": 26.77777862548828,
"blob_id": "6a60243505b2467e48723009e4130da8102d97c5",
"content_id": "0158a46fc5c32c4d94bbef8a02fd6e83966e34e1",
"detected_licenses": [
"mplus",
"MIT",
"Bitstream-Vera",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 250,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 9,
"path": "/build.sh",
"repo_name": "sgryjp/hm-font",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nprojdir=$(git rev-parse --show-toplevel)\nversion=$(grep -E '^__version__' build.py | cut -d\\\" -f 2)\n\nbuild_opts=\"-t hmfont:$version\"\ndocker build $build_opts .\n\nrun_opts=\"-v $projdir/outputs:/outputs\"\ndocker run $run_opts \"hmfont:$version\"\n"
},
{
"alpha_fraction": 0.5686299800872803,
"alphanum_fraction": 0.5887356400489807,
"avg_line_length": 36.48309326171875,
"blob_id": "87dd45050c00ff37af0e65ae207a0d6434e4c4c5",
"content_id": "1df9fe2bbc8ba5fe0dd5ef8d02abd13de7981e5c",
"detected_licenses": [
"mplus",
"MIT",
"Bitstream-Vera",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8501,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 207,
"path": "/build.py",
"repo_name": "sgryjp/hm-font",
"src_encoding": "UTF-8",
"text": "import logging\nfrom pathlib import Path\n\nimport fontforge as ff\nimport psMat\n\n__version__ = \"20.12.12\"\n__copyright__ = \"Copyright (c) 2020, Suguru Yamamoto\"\n_logger = logging.getLogger(__name__)\n\n\ndef bits_repr(*args) -> str:\n tokens = []\n for arg in args:\n if 0 <= arg:\n token = \"{:032b}\".format(arg)\n else:\n token = \"{:032b}\".format(0x1_0000_0000 + arg)\n tokens += [\n token[i * 8:(i * 8) + 8] for i in range(4)]\n return \" \".join(tokens)\n\n\ndef build(fontdir: Path, outdir: Path, bold=False):\n # フォントをロード\n hack_path = fontdir / f\"Hack-{'Bold' if bold else 'Regular'}.ttf\"\n mplus_path = fontdir / f\"mplus-1m-{'bold' if bold else 'regular'}.ttf\"\n hack: ff.font = ff.open(str(hack_path))\n mplus: ff.font = ff.open(str(mplus_path))\n\n # フォントのサイズ調整用に「M」の字でサイズ比と差を計算。\n # ただし Hack と M+1M は文字の縦横比が違うため、単純な拡縮ではマッチしない。\n # ここでは、まず高さを一致させ、横幅の拡大率を高さの 1.1 倍を超えない程度に\n # 抑え、かつ不足する横幅を記録しておく(これを元にパディング量を調整し、\n # いわゆる全角文字の幅が英数字のちょうど 2 倍になるようにする)\n vert_ratio = hack[0x4d].vwidth / mplus[0x4d].vwidth\n horiz_ratio = min(hack[0x4d].width / mplus[0x4d].width, vert_ratio * 1.1)\n horiz_pad = int(hack[0x4d].width - mplus[0x4d].width * horiz_ratio)\n scaling_ratio = psMat.scale(horiz_ratio, vert_ratio)\n _logger.info(\"scaling ratio: %s\", scaling_ratio)\n _logger.info(\"horizontal padding: %s\", horiz_pad)\n\n # Hack に無く M+ にあるコードポイントを列挙する\n font_code_points = set(g.encoding for g in hack.glyphs())\n mplus_code_points = set(g.encoding for g in mplus.glyphs())\n for code_point in mplus_code_points - font_code_points:\n # BMP の外にある文字は無視する\n if 0xffff < code_point:\n continue\n\n try:\n # この M+ のグリフを Hack に合うよう拡縮とパディング挿入を行う\n g = mplus[code_point]\n g.transform(scaling_ratio)\n g.left_side_bearing = int(g.left_side_bearing + horiz_pad / 2)\n g.right_side_bearing = int(g.right_side_bearing + horiz_pad / 2)\n g.width = int(g.width + horiz_pad)\n\n # このグリフを Hack の方にコピー\n mplus.selection.select(code_point)\n mplus.copy()\n hack.selection.select(code_point)\n hack.paste()\n except Exception as e:\n _logger.warning(\"Error on copying %s (%s): %s\",\n mplus[code_point], f\"u{code_point:x}\", e)\n\n # オリジナルから合成して引き継ぐべきメタデータを準備\n # 字体のデザイン特性はコンセプト上の主体である Hack のそれを踏襲する。\n # サポートする文字集合は、両フォントのそれの和集合とする。\n _logger.info(\"[os2_version]\")\n _logger.info(\"hack: %s\", hack.os2_version)\n _logger.info(\"mplus: %s\", mplus.os2_version)\n hack.os2_version = 1 # OS/2 table version number\n _logger.info(\"hm: %s\", hack.os2_version)\n _logger.info(\"[os2_weight]\")\n _logger.info(\"hack: %s\", hack.os2_weight)\n _logger.info(\"mplus: %s\", mplus.os2_weight)\n hack.os2_weight = 400 # Regular\n _logger.info(\"hm: %s\", hack.os2_weight)\n _logger.info(\"[os2_width]\")\n _logger.info(\"hack: %s\", hack.os2_width)\n _logger.info(\"mplus: %s\", mplus.os2_width)\n hack.os2_width = 5 # Medium (normal)\n _logger.info(\"hm: %s\", hack.os2_width)\n _logger.info(\"[os2_fstype]\")\n _logger.info(\"hack: %s\", hack.os2_fstype)\n _logger.info(\"mplus: %s\", mplus.os2_fstype)\n hack.os2_fstype = 0 # Installable Embedding\n _logger.info(\"hm: %s\", hack.os2_fstype)\n _logger.info(\"[os2_vendor]\")\n _logger.info(\"hack: %s\", hack.os2_vendor)\n _logger.info(\"mplus: %s\", mplus.os2_vendor)\n hack.os2_vendor = \"\"\n _logger.info(\"hm: %s\", hack.os2_vendor)\n # https://monotype.github.io/panose/pan2.htm\n _logger.info(\"[os2_panose]\")\n _logger.info(\"hack: %s\", hack.os2_panose)\n _logger.info(\"mplus: %s\", mplus.os2_panose)\n hack.os2_panose = (\n 2, # (Faimly Kind) Latin Text\n 11, # (Serif Style Classification) Normal Sans\n 6, # (Weight) Medium\n 9, # (Propotion) Monospaced\n 3, # (Contrast) Very Low\n 2, # (Stroke Variation) No Variation\n 2, # (Arm Style) Straight Arms/Horizontal\n 2, # (Letterform) Normal/Contact\n 2, # (Midline) Standard/Trimmed\n 4, # (X-height) Constant/Large\n )\n _logger.info(\"hm: %s\", hack.os2_panose)\n # https://docs.microsoft.com/en-us/typography/opentype/otspec140/os2ver1#ur\n _logger.info(\"[os2_unicoderanges]\")\n _logger.info(\"hack: %s\", bits_repr(*hack.os2_unicoderanges))\n _logger.info(\"mplus: %s\", bits_repr(*mplus.os2_unicoderanges))\n hack.os2_unicoderanges = tuple(\n h | m for h, m in zip(hack.os2_unicoderanges, mplus.os2_unicoderanges)\n )\n _logger.info(\"hm: %s\", bits_repr(*hack.os2_unicoderanges))\n _logger.info(\"[os2_os2_codepages]\")\n _logger.info(\"hack: %s\", bits_repr(*hack.os2_codepages))\n _logger.info(\"mplus: %s\", bits_repr(*mplus.os2_codepages))\n hack.os2_codepages = tuple(\n h | m for h, m in zip(hack.os2_codepages, mplus.os2_codepages)\n )\n _logger.info(\"hm: %s\", bits_repr(*hack.os2_codepages))\n\n # フォントのメタ情報を生成\n try:\n hack_version = hack.sfnt_names[5][2].split(\";\")[0].lower()\n except Exception as e:\n _logger.error(\"Failed to extrace Hack version: %s\\n\\n%s\",\n e, hack.sfnt_names)\n sys.exit(2)\n try:\n mplus_version = mplus.sfnt_names[5][2].lower()\n except Exception as e:\n _logger.error(\"Failed to extrace M+ version: %s\\n\\n%s\",\n e, mplus.sfnt_names)\n sys.exit(2)\n version_string = (\"Version {}; derivative of Hack {} and M+1m {}\"\n .format(__version__, hack_version, mplus_version))\n license_text = Path(\"LICENSE\").read_text()\n\n # フォントに設定\n family_name = \"HM\"\n subfamily_name = \"Bold\" if bold else \"Regular\"\n hack.fontname = f\"{family_name}-{subfamily_name}\"\n hack.familyname = family_name\n hack.fullname = f\"{family_name} {subfamily_name}\"\n locale = \"English (US)\"\n license_url = \"https://github.com/sgryjp/hm-font/blob/master/LICENSE\"\n meta = (\n __copyright__, # 0:Copyright\n family_name, # 1:Family\n subfamily_name, # 2:SubFamily\n f\"{family_name}-{subfamily_name}-{__version__}\", # 3:UniqueID\n hack.fullname, # 4:Fullname\n version_string, # 5:Version\n f\"{family_name}-{subfamily_name}\", # 6:PostScriptName\n \"\", # 7:Trademark\n \"\", # 8:Manufacturer\n \"\", # 9:Designer\n \"\", # 10:Descriptor\n \"\", # 11:Vendor URL\n \"\", # 12:Designer URL\n license_text, # 13:License\n license_url, # 14:License URL\n None, # 15:N/A\n family_name, # 16:Preferred Family\n subfamily_name, # 17:Preferred Styles\n )\n for i, value in enumerate(meta):\n if value is not None:\n hack.appendSFNTName(locale, i, value)\n\n # デバッグ用に設定したメタ情報を表示\n for _, k, v in hack.sfnt_names:\n if k != \"License\":\n _logger.debug(\"[%s]\", k)\n _logger.debug(\"%s\", v)\n\n # フォントを出力\n os.makedirs(outdir, exist_ok=True)\n hack.generate(str(outdir / f\"hm-{'bold' if bold else 'regular'}.ttf\"))\n\n\nif __name__ == \"__main__\":\n import os\n import sys\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", required=True, type=Path,\n help=\"path to read source font file\")\n parser.add_argument(\"-o\", \"--output\", required=True, type=Path,\n help=\"path to write the composed font file\")\n args = parser.parse_args()\n\n # ロガーを設定\n logging.basicConfig(format=\"[%(asctime)s] %(message)s\",\n level=logging.DEBUG)\n\n # フォントを生成\n build(Path(args.input), Path(args.output), False)\n build(Path(args.input), Path(args.output), True)\n"
},
{
"alpha_fraction": 0.6605042219161987,
"alphanum_fraction": 0.7042016983032227,
"avg_line_length": 36.1875,
"blob_id": "f6d8790e1ff257212b9627554227e20496ba8aa1",
"content_id": "d69e83d7835756b750be8f4989b64b1a5be95650",
"detected_licenses": [
"mplus",
"MIT",
"Bitstream-Vera",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 595,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 16,
"path": "/Dockerfile",
"repo_name": "sgryjp/hm-font",
"src_encoding": "UTF-8",
"text": "FROM ubuntu:focal\n\nRUN apt-get update && \\\n apt-get install -y curl python3-fontforge\n\nRUN mkdir -p /inputs\nRUN curl -fsSL https://github.com/source-foundry/Hack/releases/download/v3.003/Hack-v3.003-ttf.tar.xz | \\\n xz -d - | tar xv -C /inputs\nRUN curl -fsSL https://osdn.net/projects/mplus-fonts/downloads/62344/mplus-TESTFLIGHT-063a.tar.xz | \\\n xz -d - | tar xv -C /inputs && \\\n mv inputs/mplus-TESTFLIGHT-063a/mplus-1m-regular.ttf /inputs && \\\n mv inputs/mplus-TESTFLIGHT-063a/mplus-1m-bold.ttf /inputs\nCOPY LICENSE .\nCOPY build.py .\n\nCMD python3 build.py -i /inputs -o outputs\n"
}
] | 4 |
darsh22t/r-said
|
https://github.com/darsh22t/r-said
|
568a71e6b1d567a3be471bc4f840df93667d5f43
|
38f06fa60945fd3ab4d8da5b047e1184c1be3749
|
501880f8eb8bb027e56c346f6dbeb42e6b77e1a6
|
refs/heads/main
| 2023-08-04T20:28:33.101150 | 2021-10-07T15:55:19 | 2021-10-07T15:55:19 | 414,665,901 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5203251838684082,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 15.857142448425293,
"blob_id": "c53a8ab28101d4b3f9c7792e0a4e8bea45533a77",
"content_id": "5354b87c5be4badb1188d8039effcfd04f3d7325",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "darsh22t/r-said",
"src_encoding": "UTF-8",
"text": "requests==2.25.1\r\nrequests-file==1.5.1\r\nrequests-html==0.10.0\r\nFlask==2.0.1\r\nFlask-SSLify==0.1.5\r\nWerkzeug==2.0.1\r\ngunicorn"
},
{
"alpha_fraction": 0.5529841780662537,
"alphanum_fraction": 0.5968331098556519,
"avg_line_length": 26.379310607910156,
"blob_id": "39155c7f095c705c131a3c0c485d3762f4cca6cc",
"content_id": "9589834e46e8569573c21e764d19811a2293ee11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 821,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 29,
"path": "/app.py",
"repo_name": "darsh22t/r-said",
"src_encoding": "UTF-8",
"text": "import requests\r\nfrom flask import Flask,redirect,make_response\r\n\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\[email protected]('/<name>')\r\ndef hello(name):\r\n\r\n sheet = requests.get('http://95.111.230.118/kisho/page/active_r.php?page=rr')\r\n link = sheet.text.split('\"')[1].split('\\/\\/')\r\n url = f'{link[0]}//{link[1]}'\r\n r = make_response(redirect(f\"{url}{name}\", code=301))\r\n r.headers.set('alt-svc', \"clear\")\r\n r.headers.set('cache-control', \"private, max-age=90\")\r\n r.headers.set('content-security-policy', \"referrer always;\")\r\n r.headers.set('referrer-policy', \"unsafe-url\")\r\n r.headers.set('server', \"nginx\")\r\n r.headers.set('via', \"1.1 google\")\r\n return r,301\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # Bind to PORT if defined, otherwise default to 5000.\r\n app.run(host='0.0.0.0', port=int('5000'))"
}
] | 2 |
fredrikhl/screenshot
|
https://github.com/fredrikhl/screenshot
|
6477112e1fef6332ac7c0f81920cdcd13efa8892
|
b9476d7649c891e2916a1f690ec586222fb8dff7
|
c658e1c5184e3ed66d24f9e1fc5643546cda373a
|
refs/heads/master
| 2021-01-12T16:43:47.099018 | 2020-07-16T16:47:32 | 2020-07-16T16:47:32 | 71,440,533 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6239157319068909,
"alphanum_fraction": 0.6239157319068909,
"avg_line_length": 25.459016799926758,
"blob_id": "1661c03b50605c9c8d1872d6f4b90ef2e0163d4d",
"content_id": "1ebff4218eb036f141962522be16f77dcb088a16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1614,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 61,
"path": "/README.md",
"repo_name": "fredrikhl/screenshot",
"src_encoding": "UTF-8",
"text": "# screenshot\n\nThis scripts takes screenshots, and places them in a configured folder with a\npre-defined name syntax.\n\nWhen invoked, the user will be prompted to select a desktop area or a window.\n\n\n## Usage\n\n```bash\n$ python screenshot.py -h\nusage: screenshot.py [-h] [-w] [-b] [-d <dir>] [-p <str>] [-t <format>]\n [-f <file>]\n\nTake a screenshot using ImageMagick's `import', and X window selection.\n\noptional arguments:\n -h, --help show this help message and exit\n -w, --window Screenshot of a full window\n -b, --border Include window manager decorations in windowed\n screenshot\n -d <dir>, --dir <dir>\n Store screenshot(s) in <dir>\n -p <str>, --file-prefix <str>\n Prefix screenshot file with <str>\n -t <format>, --type <format>\n Set the screenshot file type to <format>\n -f <file>, --file <file>\n Ignore all other options and write directly to <file>\n```\n\n\n## Examples\n\nTake a windowed screenshot with window manager decorations:\n\n```bash\n$ python screenshot.py --window --border --file 'somewindow.png'\n```\n\nTake a screenshot of a desktop area, and place it in a folder\n\n```bash\n$ python screenshot.py --file-prefix area --type jpg --dir /tmp/\n```\n\n\n## Requirements\n\nThis script uses [[ImageMagick]] (`import`) to take screenshots, and [[X.Org]]\n`xwininfo` to select windows.\n\n\n## TODO\n\nReplace `import` and `xwininfo` subprocess calls with bindings to libraries.\n\n\n [ImageMagick]: http://www.imagemagick.org/\n [X.Org]: https://www.x.org/\n"
},
{
"alpha_fraction": 0.6125289797782898,
"alphanum_fraction": 0.613689124584198,
"avg_line_length": 22.94444465637207,
"blob_id": "9eceb6d30feb1618742dc0bf29fb04ed1a5da283",
"content_id": "f593965fa009bf087bbe3bf1329735728bda5ec4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 862,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 36,
"path": "/setup.py",
"repo_name": "fredrikhl/screenshot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n\ndef get_packages():\n \"\"\" List of (sub)packages to install. \"\"\"\n return find_packages('.', include=('screenshot', ))\n\n\ndef read_textfile(filename):\n \"\"\" Get contents from a text file. \"\"\"\n with open(filename, 'rU') as fh:\n return fh.read()\n\n\ndef setup_package():\n \"\"\" build and run setup. \"\"\"\n\n setup(\n name='screenshot',\n description='Take screenshots with ImageMagick and xwininfo',\n long_description=read_textfile('README.md'),\n author='fredrikhl',\n url='https://github.com/fredrikhl/screenshot',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n packages=get_packages(),\n scripts=['screenshot.py'],\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n"
},
{
"alpha_fraction": 0.5584025382995605,
"alphanum_fraction": 0.5607881546020508,
"avg_line_length": 27.87244987487793,
"blob_id": "12f9819160f93ab3f473192a75ee3443966209af",
"content_id": "e61fcd28c8397328a7fffd4dfb1bf34c84f18749",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11318,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 392,
"path": "/screenshot.py",
"repo_name": "fredrikhl/screenshot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# encoding: utf8\n\"\"\" Take a screenshot using ImageMagick's `import', and X window selection. \"\"\"\nfrom __future__ import unicode_literals, print_function, absolute_import\n\nimport argparse\nimport datetime\nimport logging\nimport os\nimport re\nimport subprocess\nimport io\nimport select\n\nlogger = logging.getLogger('screenshot')\n\n\nclass Setting(object):\n \"\"\" A setting descriptor with a default value. \"\"\"\n\n def __init__(self, name, default=None):\n \"\"\" Initialize with a default value, and a set of value limits. \"\"\"\n self.name = name\n self.default = default\n\n def __repr__(self):\n return ('{cls.__name__}({obj.name!r}, default={obj.default!r})'\n ).format(cls=type(self), obj=self)\n\n @property\n def attr(self):\n return '_{cls.__name__}__{obj.name}_value_0x{_id:02x}'.format(\n cls=type(self),\n obj=self,\n _id=id(self))\n\n def __get__(self, obj, cls=None):\n if obj is None:\n return self\n return getattr(obj, self.attr, self.default)\n\n def __set__(self, obj, value):\n setattr(obj, self.attr, value)\n\n def __delete__(self, obj):\n if hasattr(obj, self.attr):\n delattr(obj, self.attr)\n\n\nclass FileNamer(object):\n \"\"\" Settings container object. \"\"\"\n\n directory = Setting('directory', default='')\n namefmt = Setting('namefmt', default='screenshot_{number}_{datetime}')\n datefmt = Setting('datefmt', default='%Y-%m-%d_%H-%M')\n ext = Setting('ext', default='png')\n digits = Setting('digits', default=4)\n\n def __init__(self, **kwargs):\n cls = type(self)\n for key in kwargs:\n if hasattr(cls, key) and isinstance(getattr(cls, key), Setting):\n setattr(self, key, kwargs[key])\n\n def __iter__(self):\n def settings_generator():\n cls = type(self)\n for name in dir(cls):\n if isinstance(getattr(cls, name), Setting):\n yield name\n return settings_generator()\n\n def __repr__(self):\n data = dict((name, getattr(self, name))\n for name in self)\n args = ', '.join('{name}={value!r}'.format(name=name, value=data[name])\n for name in data)\n return '{cls.__name__}({attr})'.format(cls=type(self), attr=args)\n\n def format_datetime(self, d):\n return d.strftime(self.datefmt)\n\n def format_number(self, i):\n return '{number:0{padding:d}d}'.format(number=i, padding=self.digits)\n\n def format_basename(self, datetime, number):\n data = {\n 'datetime': datetime,\n 'number': number,\n }\n return self.namefmt.format(**data)\n\n def _find_max(self):\n path = os.path.abspath(self.directory)\n max_int_pattern = re.compile(\n '^' + self.format_basename('.*', '(?P<number>[0-9]+)') +\n '\\\\' + os.path.extsep + '.*$')\n logger.debug('pattern: %s', max_int_pattern.pattern)\n\n def _iter_matches():\n yield 0\n for f in os.listdir(path):\n match = max_int_pattern.match(f)\n if os.path.isfile(os.path.join(path, f)) and match:\n yield int(match.group('number'))\n\n return max(_iter_matches())\n\n def suggest_filename(self):\n \"\"\" Suggest a filename based on settings. \"\"\"\n basename = self.format_basename(\n self.format_datetime(datetime.datetime.now()),\n self.format_number(self._find_max() + 1))\n filename = os.path.extsep.join((basename, self.ext))\n logger.debug('filename: %r', filename)\n return os.path.join(self.directory, filename)\n\n def __call__(self, filename=None):\n \"\"\" Calculate the file name for the next screenshot. \"\"\"\n filename = self.suggest_filename()\n if os.path.exists(filename):\n raise ValueError(\"File exists: %s\" % filename)\n return filename\n\n\ndef run_command(*cmd):\n logger.debug('running %r', cmd)\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding='utf-8',\n )\n\n stdout = io.StringIO()\n stderr = io.StringIO()\n # process output\n log_io = {\n proc.stdout: (\n (lambda line: logger.debug('[%d] stdout: %s',\n proc.pid, line.rstrip())),\n (lambda line: stdout.write(line)),\n ),\n proc.stderr: (\n (lambda line: logger.warning('[%d] stderr: %s',\n proc.pid, line.rstrip())),\n (lambda line: stderr.write(line)),\n ),\n }\n\n while log_io:\n ready, _, _ = select.select(log_io.keys(), [], [])\n for fd in ready:\n handlers = log_io[fd]\n line = fd.readline()\n if line:\n for fn in handlers:\n fn(line)\n else:\n fd.close()\n del log_io[fd]\n\n # handle exit\n status = proc.wait()\n level = logging.DEBUG\n etype = 'exit'\n err = False\n\n if status != 0:\n level = logging.ERROR\n err = True\n if os.WIFSIGNALED(status):\n status = os.WTERMSIG(status)\n etype = 'signal'\n else:\n status = os.WSTOPSIG(status)\n\n logger.log(level, 'process pid=%r %s=%r', proc.pid, etype, status)\n if err:\n raise RuntimeError('command %r failed' % (cmd,))\n return stdout.getvalue(), stderr.getvalue()\n\n\ndef _fetch_window(*args):\n \"\"\" Wait for the user to select an X window. \"\"\"\n out, _ = run_command('xwininfo', *args)\n\n result = re.search(r'^xwininfo: Window id: (?P<window>0x[0-9A-Za-z]+) .*',\n out, re.MULTILINE)\n if not result:\n logger.error(\"xwininfo - no window id in output\")\n raise RuntimeError(\"unable to fetch window\")\n\n return result.group('window')\n\n\ndef fetch_window():\n return _fetch_window()\n\n\ndef verify_window_root():\n return _fetch_window('-root')\n\n\ndef verify_window_id(window_id):\n return _fetch_window('-id', '0x' + format(window_id, 'x'))\n\n\ndef verify_window_name(window_name):\n return _fetch_window('-name', str(window_name))\n\n\ndef take_screenshot(dest, window=None, frame=True):\n \"\"\" Take a screenshot with `import'. \"\"\"\n cmd = ['import', ]\n\n if window is not None:\n cmd.extend(['-window', str(window), ])\n if frame:\n cmd.append('-frame')\n\n cmd.append(dest)\n\n out, _ = run_command(*cmd)\n\n\ndef excepthook(exc, val, tb):\n \"\"\" Print type and value, raise SystemExit. \"\"\"\n raise SystemExit(\"%s: %s\" % (exc.__name__, str(val)))\n\n\ndef window_type(value):\n \"\"\" validate and convert a hex/oct/dec string -> int \"\"\"\n value = value.strip().lower()\n if value.startswith('0x'):\n value = int(value[2:], 16)\n elif value.startswith('0o'):\n value = int(value[2:], 8)\n else:\n value = int(value, 10)\n return value\n\n\ndef writable_dir(value):\n if not os.path.exists(value):\n raise ValueError(\"%r does not exist\" % (value, ))\n if not os.path.isdir(value):\n raise ValueError(\"%r is not a directory\" % (value, ))\n if not os.access(value, os.R_OK | os.X_OK):\n raise ValueError(\"%r is not writable\" % (value, ))\n return value\n\n\ndef datetime_format(s):\n # Just make sure it works\n res = datetime.datetime.now().strftime(s)\n if len(res) < 1:\n raise ValueError(\"datetime format %r results in empty string\" % (s, ))\n base = datetime.datetime.fromtimestamp(0).strftime(s)\n if res == base:\n raise ValueError(\"datetime format %r has no date or time components\" %\n (s, ))\n return s\n\n\nDEFAULT_DIRECTORY = '/tmp'\nDEFAULT_NAMEFMT = '{number}_{datetime}'\nDEFAULT_DATEFMT = '%Y-%m-%d_%H-%M'\nDEFAULT_EXT = 'png'\nVALID_EXT = set(('png', 'gif', 'jpg'))\n\n\ndef main(args=None):\n \"\"\" Script invoke. \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n\n what_args = parser.add_argument_group('selection')\n window_args = what_args.add_mutually_exclusive_group()\n\n window_args.add_argument(\n '-w', '--window',\n dest='window_select',\n action='store_true',\n default=False,\n help='select a window to screenshot')\n window_args.add_argument(\n '-W', '--window-id',\n dest='window_id',\n type=window_type,\n help='take screenshot of window with ID %(metavar)s',\n metavar='<id>')\n window_args.add_argument(\n '-r', '--window-root',\n dest='window_root',\n action='store_true',\n default=False,\n help='take screenshot of the root window',\n )\n window_args.add_argument(\n '--window-name',\n dest='window_name',\n help='take screenshot of window with ID %(metavar)s',\n metavar='<name>',\n )\n\n what_args.add_argument(\n '-b', '--border',\n dest='border',\n action='store_true',\n default=False,\n help='include window manager decorations in windowed screenshot')\n\n output_args = parser.add_argument_group('output')\n output_args.add_argument(\n '-d', '--dir',\n dest='directory',\n type=writable_dir,\n default=DEFAULT_DIRECTORY,\n help='Store screenshot(s) in %(metavar)s (%(default)s)',\n metavar='<dir>',\n )\n output_args.add_argument(\n '--date-format',\n dest='datefmt',\n type=datetime_format,\n default=DEFAULT_DATEFMT,\n help='use format %(metavar)s for datetime (%(default)s)',\n metavar='<format>',\n )\n output_args.add_argument(\n '--format',\n dest='namefmt',\n default=DEFAULT_NAMEFMT,\n help='use format %(metavar)s for the filename (%(default)s)',\n metavar='<format>',\n )\n output_args.add_argument(\n '-t', '--type',\n dest='ext',\n choices=VALID_EXT,\n default=DEFAULT_EXT,\n help='Set the screenshot file type to %(metavar)s (%(default)s)',\n metavar='<ext>',\n )\n output_args.add_argument(\n '-f', '--file',\n dest='filename',\n help='ignore all other options and write directly to %(metavar)s',\n metavar='<file>',\n )\n\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(levelName)s - %(name) - %(message)',\n )\n\n args = parser.parse_args(args)\n # sys.excepthook = excepthook\n\n namer = FileNamer(\n directory=args.directory,\n namefmt=args.namefmt,\n datefmt=args.datefmt,\n ext=args.ext\n )\n\n if args.filename:\n filename = args.filename\n else:\n filename = namer()\n\n if args.window_select:\n window = fetch_window()\n elif args.window_id:\n window = verify_window_id(args.window_id)\n elif args.window_name:\n window = verify_window_name(args.window_name)\n elif args.window_root:\n window = verify_window_root()\n else:\n window = None\n\n logger.debug('taking screenshot with settings: %r', namer)\n take_screenshot(filename, window=window, frame=args.border)\n logger.info('screenshot written to %r', filename)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(levelname)s: %(message)s\")\n main()\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5189003348350525,
"avg_line_length": 26.714284896850586,
"blob_id": "1fa8ab67336eb53bc2ca12fd1f5c0add07178a64",
"content_id": "a83e435bb5eaa901ed56c3e4cecd8d8ebb057363",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 21,
"path": "/screencast.sh",
"repo_name": "fredrikhl/screenshot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"Click a window to capture\"\nxwininfo > \"/tmp/$$_xwinf\"\n\nWIDTH=\"$(grep Width \"/tmp/$$_xwinf\" | perl -pe \"s/\\D+(\\d+)\\D+/\\1/g\")\"\nHEIGHT=\"$(grep Height \"/tmp/$$_xwinf\" | perl -pe \"s/\\D+(\\d+)\\D+/\\1/g\")\"\nOFFX=\"$(grep 'Absolute upper-left X' \"/tmp/$$_xwinf\" | perl -pe \"s/\\D+(\\d+)\\D+/\\1/g\")\"\nOFFY=\"$(grep 'Absolute upper-left Y' \"/tmp/$$_xwinf\" | perl -pe \"s/\\D+(\\d+)\\D+/\\1/g\")\"\n\nrm -f \"/tmp/$$_xwinf\"\n\necho \"Run:\"\necho ffmpeg \\\n -f alsa \\\n -i default \\\n -f x11grab \\\n -r 15 \\\n -s \"${WIDTH}x${HEIGHT}\" \\\n -i \":0.0+$OFFX,$OFFY\" \\\n /tmp/screencast.mp4\n"
}
] | 4 |
woozi1122/pytorch-yolov
|
https://github.com/woozi1122/pytorch-yolov
|
63db0c34cee46ec6ee0d30997e9d7ade65df36b1
|
313dd25c297554d114d28727298ffe1386fd4a0a
|
22243831ccc94ff871720eb0f1a1f4ddeff8c393
|
refs/heads/master
| 2023-09-05T21:38:12.921171 | 2021-11-24T08:12:35 | 2021-11-24T08:12:35 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.521631121635437,
"alphanum_fraction": 0.5348789095878601,
"avg_line_length": 35.59848403930664,
"blob_id": "cb6ca8478194f822e8d744c64355272bed12b941",
"content_id": "df1b7191a8aae5f5c532053c50313d2e3ae647da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5107,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 132,
"path": "/valid.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append(\"./util\")\nimport numpy as np\nfrom torch import nn\nimport torch\nfrom util.const import CONST\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom util.dataload import YOLODataSet, yolo_dataset_collate\nfrom terminaltables import AsciiTable\nfrom util.model import MyYOLO, getWeight,initialParam\nfrom util.utils import handleBox,notMaxSuppression,iou\n\n# 获取正确的框框\ndef getTrueBox(outputs, bboxes):\n res = []\n for i,output in enumerate(outputs):\n # 对于一张图\n if output is None: # 没有预测框就跳过\n continue\n preBoxes = output[:,:4]\n preLabels = output[:,6]\n preConf = output[:,4] * output[:,5]\n targetBboxes = bboxes[bboxes[:,0] == i]\n targetBoxes = targetBboxes[:,2:]\n targetLabels = targetBboxes[:,1]\n detectedBox = []\n isCor = torch.zeros_like(preLabels)\n for j, preBox in enumerate(preBoxes):\n # 对于一个框\n if (len(detectedBox) == len(targetLabels)):\n break\n # print(iou(preBox, targetBoxes, isleftT2rightD=True))\n iout, maxI = torch.max(iou(preBox, targetBoxes, isleftT2rightD=True), 0)\n if iout > CONST.valIOUTher and maxI not in detectedBox and preLabels[j] == targetLabels[maxI]:\n isCor[j] = 1\n detectedBox.append(maxI)\n res.append([isCor, preConf, preLabels])\n return res\n\n#==================================#\n# 计算模型参数 \n#==================================#\ndef calMap(isCor, preConf, preLabels, targetLabels):\n sI = np.argsort(-preConf)\n isCor = isCor[sI]\n preConf = preConf[sI]\n preLabels = preLabels[sI]\n uClasses = np.unique(targetLabels)\n R = []\n P = []\n AP = []\n for oneCls in uClasses:\n sI = preLabels == oneCls\n isCorOneCls = isCor[sI]\n\n targetLabelsOneCls = targetLabels[targetLabels == oneCls]\n tarTrueC = targetLabelsOneCls.size # 目标框为该类的数量\n preTrueC = isCorOneCls.size # 预测框为该类的数量\n\n if preTrueC == 0:\n R.append(0)\n P.append(0)\n AP.append(0)\n continue\n tpC = isCorOneCls.cumsum()\n fpC = (1 - isCorOneCls).cumsum()\n\n r = tpC / tarTrueC\n p = tpC / (tpC + fpC)\n R.append(r[-1])\n P.append(p[-1])\n # 在前面添加是往前取矩形,在后面添加是让召回率可以达到1\n r = np.concatenate(([0.0], r, [1.0]))\n p = np.concatenate(([0.0], p, [0.0]))\n # 保证p单调递减\n for i in range(p.size - 1, 0, -1):\n p[i - 1] = max(p[i], p[i - 1])\n # 删除重复项\n i = np.where(r[1:] != r[:-1])[0]\n ap = np.sum((r[i+1] - r[i]) * p[i+1])\n AP.append(ap)\n return R,P,AP,uClasses\n \n#==================================#\n# show MP \n#==================================#\ndef showMap(R,P,AP,uClasses):\n res = [[\"class\",\"AP\", \"R\", \"P\"]]\n for i,_ in enumerate(uClasses):\n res.append([CONST.classes[int(uClasses[i])], \"%.4f\" % AP[i], \"%.4f\" % R[i], \"%.4f\" % P[i]])\n res.append([])\n res.append([\"MAP\", \"%.4f\" % np.average(AP)])\n print(AsciiTable(res).table)\n\n#==================================#\n# 验证 \n#==================================#\ndef valid():\n yolo = MyYOLO() # type: nn.Module\n getWeight(yolo)\n yolo.eval()\n yolo.to(CONST.device)\n valDataSet = YOLODataSet(train=False, type=\"coco\")\n valDataLoader = DataLoader(valDataSet, batch_size=CONST.batchSize, num_workers=CONST.num_workers, shuffle=False, pin_memory=True,\n drop_last=True,collate_fn=yolo_dataset_collate)\n corBox = []\n targetLabels = []\n with torch.no_grad():\n for imgs, bboxes in tqdm(valDataLoader, desc=\"Validating\"):\n imgs = imgs.to(CONST.device)\n bboxes = bboxes.to(CONST.device) # 输入的数据为[picNumber,cls,x,y,w,h]\n output = yolo(imgs)\n output = handleBox(output, yolo) # 处理先验框 返回的数据大小为(batchSize, 10647, 85)\n output = notMaxSuppression(output) # 非极大值抑制, 返回的数据为batchSize[x,y,x,y,conf,cls]\n # print(f\"抑制后的结果:{len(output), [x.shape if x is not None else None for x in output]}\")\n bboxes[:,2:] = torch.cat([bboxes[:,2:4] - bboxes[:,4:6] / 2, bboxes[:,2:4] + bboxes[:,4:6] / 2], 1) #转换为xyxy\n corBox.extend(getTrueBox(output, bboxes))\n targetLabels.append(bboxes[:,1])\n if len(corBox) == 0:\n print(\"没有任何输出\")\n exit()\n isCor, preConf, preLabels = [torch.cat(x, 0).cpu().numpy() for x in zip(*corBox)]\n targetLabels = torch.cat(targetLabels, 0).cpu().numpy()\n R,P,AP,uClasses = calMap(isCor, preConf, preLabels, targetLabels)\n showMap(R,P,AP,uClasses)\n\n#==================================#\n# 主函数 \n#==================================#\nif __name__=='__main__':\n valid()\n"
},
{
"alpha_fraction": 0.4568335711956024,
"alphanum_fraction": 0.5036535859107971,
"avg_line_length": 35.766170501708984,
"blob_id": "85af750928a14ea26793cd094c6b0c4b09ef4b8c",
"content_id": "d9ee825126606c94ff00923dd16e3bf26105c10f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7596,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 201,
"path": "/util/model.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "\nfrom imgaug.augmentables import bbs\nimport numpy as np\nfrom torch import nn\nimport torch\nfrom torch.functional import Tensor\nfrom utils import *\nfrom const import *\nimport math\n#==================================#\n# 简单的卷积层 \n#==================================#\nclass Conv(nn.Module):\n def __init__(self, inputC, outputC, keralSize, stride = 1, padding = \"same\") -> None:\n super(Conv, self).__init__()\n self.m = nn.Sequential(\n nn.Conv2d(inputC, outputC, keralSize, stride, padding, bias=False),\n nn.BatchNorm2d(outputC),\n nn.LeakyReLU(0.1)\n )\n def forward(self, x):\n return self.m(x)\n#==================================#\n# 残差块 \n#==================================#\nclass Residual(nn.Module):\n def __init__(self, inputC) -> None:\n super(Residual, self).__init__()\n tempC = inputC // 2\n self.m = nn.Sequential(\n Conv(inputC, tempC, 1, 1, 0),\n Conv(tempC, inputC, 3, 1, 1)\n )\n def forward(self, x):\n return x + self.m(x)\n#==================================#\n# darknet53 \n#==================================#\nclass Darknet53(nn.Module):\n def __init__(self) -> None:\n super(Darknet53, self).__init__()\n # 定义darknet53的层数\n self.layoutNumber = [1, 2, 8, 8, 4]\n self.layerA = nn.Sequential(\n Conv(3, 32, 3, 1, 1),\n self.MultiResidual(32, 64, 1),\n self.MultiResidual(64, 128, 2),\n self.MultiResidual(128, 256, 8)\n )\n self.layerB = self.MultiResidual(256, 512, 8)\n self.layerC = self.MultiResidual(512, 1024, 4)\n\n # 进行权值初始化\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n \n def forward(self, x):\n out1 = self.layerA(x)\n out2 = self.layerB(out1)\n out3 = self.layerC(out2)\n return out1, out2, out3\n\n # 多层的残差网络\n def MultiResidual(self, inputC, outputC, count):\n t = [Conv(inputC, outputC, 3, 2, 1) if i == 0 else Residual(outputC) for i in range(count + 1)]\n return nn.Sequential(*t)\n\n#==================================#\n# convSet \n#==================================#\nclass convSet(nn.Module):\n def __init__(self, inputC, outputC, midC) -> None:\n super(convSet, self).__init__()\n self.m = nn.Sequential(\n Conv(inputC, outputC, 1),\n Conv(outputC, midC, 3),\n Conv(midC, outputC, 1),\n Conv(outputC, midC, 3),\n Conv(midC, outputC, 1),\n )\n def forward(self, x):\n return self.m(x)\n\n#==================================#\n# lastLayer \n#==================================#\nclass LastLayer(nn.Module):\n def __init__(self, inputC, outputC, anchor=None) -> None:\n super(LastLayer, self).__init__()\n self.grid = None\n self.anchor = np.array(anchor)\n self.anchorScaled = []\n self.stride = 1\n self.shape = None\n self.m = nn.Sequential(\n Conv(inputC, inputC * 2, 3),\n nn.Conv2d(inputC * 2, outputC, 1)\n )\n def forward(self, x):\n o = self.m(x)\n if self.grid is None:\n self._createGrid(o.shape)\n return o\n def _createGrid(self, shape):\n b,c,h,w = shape\n self.shape = (h, w)\n self.stride = CONST.inputShape[0] / h\n self.anchorScaled = torch.tensor(self.anchor / self.stride, device=CONST.device)\n grid = torch.ones((b,len(self.anchor),h,w,4),device=CONST.device)\n gridY, gridX = torch.meshgrid(torch.arange(h), torch.arange(w), indexing=\"ij\")\n grid[...,0] *= gridX.to(CONST.device).unsqueeze(0)\n grid[...,1] *= gridY.to(CONST.device).unsqueeze(0)\n grid[...,2] *= self.anchorScaled[:,0].view(1,len(self.anchor),1,1)\n grid[...,3] *= self.anchorScaled[:,1].view(1,len(self.anchor),1,1)\n self.grid = grid\n \n#==================================#\n# 定义yolo模型 \n#==================================#\nclass MyYOLO(nn.Module):\n def __init__(self) -> None:\n super(MyYOLO, self).__init__()\n # 得到 1024*13*13\n self.darknet53 = Darknet53()\n # 得到 512*13*13\n self.convSet1 = convSet(1024, 512, 1024)\n # 得到 256*26*26, 但是后面要和另一层的输出合起来,得到的应该是 (512+256)*26*26\n self.layerA = nn.Sequential(\n Conv(512, 256, 1),\n nn.Upsample(scale_factor=2, mode='nearest')\n )\n # 得到 256*26*26\n self.convSet2 = convSet(256 + 512, 256, 512)\n \n # 得到 128*52*52, 但是后面要和另一层的输出合起来,得到的应该是 (128+256)*52*52\n self.layerB = nn.Sequential(\n Conv(256, 128, 1),\n nn.Upsample(scale_factor=2, mode='nearest')\n )\n # 得到 256*26*26\n self.convSet3 = convSet(128 + 256, 128, 256)\n\n # 得到 75*13*13\n self.predict1 = LastLayer(512, CONST.anchorNumber * (5 + CONST.classNumber), anchor=CONST.anchor[0])\n # 得到 75*26*26\n self.predict2 = LastLayer(256, CONST.anchorNumber * (5 + CONST.classNumber), anchor=CONST.anchor[1])\n # 得到 75*52*52\n self.predict3= LastLayer(128, CONST.anchorNumber * (5 + CONST.classNumber), anchor=CONST.anchor[2])\n self.lastLayers = [self.predict1, self.predict2, self.predict3]\n def forward(self, x):\n x1, x2, x3 = self.darknet53(x)\n convOut1 = self.convSet1(x3)\n out1 = self.predict1(convOut1)\n layerOut = self.layerA(convOut1)\n additon = torch.cat([layerOut, x2], 1)\n convOut2 = self.convSet2(additon)\n out2 = self.predict2(convOut2)\n layerOut = self.layerB(convOut2)\n additon = torch.cat([layerOut, x1], 1)\n convOut3 = self.convSet3(additon)\n out3 = self.predict3(convOut3)\n return out1, out2, out3\n\n#==================================#\n# 迁移学习 \n#==================================#\ndef getWeight(yolo):\n yolo.apply(initialParam)\n weightData = torch.load(\"yolo_weights.pth\", map_location=\"cuda\")\n # yolo.load_state_dict(weightData)\n # return\n myWeightData = yolo.state_dict()\n keys = list(yolo.state_dict().keys())\n keys = np.concatenate([keys[:342], keys[414:422], keys[342:378], keys[422:430],keys[378:414],keys[430:438]])\n i = 0\n for k, v in weightData.items():\n if keys[i].find(\"num_batches_tracked\") != -1:\n i+=1\n myWeightData[keys[i]].copy_(v)\n i+=1\n yolo.eval()\n\n#==================================#\n# 初始化参数 \n#==================================#\ndef initialParam(m):\n if isinstance(m, nn.Conv2d):\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n torch.nn.init.normal_(m.weight, std=1e-3)\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)"
},
{
"alpha_fraction": 0.5412920713424683,
"alphanum_fraction": 0.561363160610199,
"avg_line_length": 43.28703689575195,
"blob_id": "1bd82264108d7ceeb4e27dcf262bedc9ca877fd7",
"content_id": "81c2c8bf5126160e732988636e2838314fa38803",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4821,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 108,
"path": "/train.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "\nimport sys\nfrom numpy.core.fromnumeric import size\nsys.path.append(\"./util\")\nfrom torch import nn\nimport torch\nfrom util.const import CONST\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom tqdm import tqdm\nfrom util.dataload import YOLODataSet, yolo_dataset_collate\nfrom util.model import MyYOLO, getWeight, LastLayer, initialParam\nfrom util.utils import iouOne2One,iou,xywh2xyxy,xyxy2xywh\n#==================================#\n# 损失函数 \n#==================================#\ndef getLoss(yoloOut, yolo,bboxes):\n BCELoss = nn.BCELoss()\n MSELoss = nn.MSELoss()\n bboxes = torch.cat([bboxes, torch.zeros(bboxes.shape[0],1,device=CONST.device)], 1)\n anchorRelate = torch.tensor(CONST.anchor, device=CONST.device).view(-1,2) / 416\n anchorRelate = torch.cat([torch.zeros_like(anchorRelate), anchorRelate], 1)\n boxesWH = torch.cat([torch.zeros_like(bboxes[:,4:6]), bboxes[:,4:6]], 1)\n for i,item in enumerate(boxesWH):\n bboxes[i][6] = torch.argmax(iou(item, anchorRelate)) # [bs, cls, x,y,w,h,an]\n # print(bboxes)\n loss = 0\n for l,output in enumerate(yoloOut):\n lastLayer = yolo.lastLayers[l]\n ba,c,h,w = output.shape\n output = output.view(ba,len(lastLayer.anchor),-1,h,w).permute(0,1,3,4,2).contiguous()\n b, cls, boxesScaled, an, i, j = buildTarget(bboxes, lastLayer, l)\n tConf = torch.zeros_like(output[..., 4], device=CONST.device)\n xLoss,yLoss,wLoss,hLoss,clsLoss = [0,0,0,0,0]\n if b.shape[0] != 0:\n pr = output[b, an, i, j] # type:torch.Tensor\n tConf[b, an, i, j] = 1\n pr[:,:2] = pr[:,:2].sigmoid()\n xLoss = BCELoss(pr[..., 0], boxesScaled[...,0])\n yLoss = BCELoss(pr[..., 1], boxesScaled[...,1])\n wLoss = MSELoss(pr[..., 2], boxesScaled[...,2]) * 0.5\n hLoss = MSELoss(pr[..., 3], boxesScaled[...,3]) * 0.5\n clsLoss = BCELoss(pr[:,5:].sigmoid(), cls)\n confLoss = BCELoss(output[..., 4].sigmoid(),tConf)\n loss = loss + xLoss + yLoss + wLoss + hLoss + clsLoss + confLoss\n return loss\n\n#==================================#\n# 返回这一层的目标框 \n#==================================#\ndef buildTarget(bboxes:torch.Tensor, lastLayer:LastLayer, l):\n corrBox = []\n h,w = lastLayer.shape\n for item in bboxes:\n if item[-1] in CONST.anchorIndex[l]:\n item[-1] = CONST.anchorIndex[l].index(item[-1])\n corrBox.append(item.view(1,-1))\n corrBox = torch.cat(corrBox) if len(corrBox) else torch.Tensor(size=(0,7)).to(CONST.device)\n b = corrBox[:,0].long()\n cl = corrBox[:, 1].long()\n cls = torch.zeros((cl.shape[0], CONST.classNumber), device=CONST.device)\n cls[torch.arange(cl.shape[0]), cl] = 1\n an = corrBox[:,-1].long()\n boxesScaled = corrBox[:,2:6] * torch.tensor([w,h,w,h], device=CONST.device)\n ij = boxesScaled[:,:2].long()\n boxesScaled[:,:2] = boxesScaled[:,:2] - ij\n i = ij[:, 0]\n j = ij[:, 1]\n boxesScaled[:,2:4] = torch.log(boxesScaled[:,2:4] / torch.tensor([w,h], device=CONST.device).view(1,2))\n return b, cls, boxesScaled, an, i, j\n\n#==================================#\n# 训练 \n#==================================#\ndef train():\n yolo = MyYOLO() #type: nn.Module\n yolo.apply(initialParam) # 迁移学习\n getWeight(yolo)\n yolo.train()\n yolo.to(CONST.device)\n trainDataSet = YOLODataSet(train=True, type=\"coco\")\n trainDataLoader = DataLoader(trainDataSet, batch_size=CONST.batchSize, num_workers=CONST.num_workers, shuffle=True, pin_memory=True,\n drop_last=True,collate_fn=yolo_dataset_collate)\n optimizer = optim.Adam(yolo.parameters(), 5e-4, weight_decay = 5e-4)\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.94)\n for epoch in range(CONST.epochs):\n with tqdm(total=len(trainDataLoader),postfix=dict,mininterval=0.3) as pbar:\n pbar.set_description(f'train Epoch {epoch + 1}/{CONST.epochs}')\n s = 0\n for imgs, bboxes in trainDataLoader:\n imgs = imgs.to(CONST.device)\n bboxes = bboxes.to(CONST.device)\n optimizer.zero_grad()\n yoloOut = yolo(imgs)\n loss = getLoss(yoloOut, yolo,bboxes)\n loss.backward()\n optimizer.step()\n pbar.set_postfix(**{\"loss\":loss.item(), \"lr\": optimizer.param_groups[0]['lr']})\n pbar.update(1)\n s += 1\n if s == 1000:\n torch.save(yolo.state_dict(),\"weight.pth\")\n s = 0\n lr_scheduler.step()\n pbar.close()\n torch.save(yolo.state_dict(),\"weight.pth\")\n\nif __name__=='__main__':\n train()"
},
{
"alpha_fraction": 0.42499589920043945,
"alphanum_fraction": 0.5270233154296875,
"avg_line_length": 32.063289642333984,
"blob_id": "bc6e3ed8709367a6d0fc079144091aea4ce10378",
"content_id": "10fb7c008b5f2929ea190f0ed83a53fc1931e310",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21315,
"license_type": "no_license",
"max_line_length": 309,
"num_lines": 553,
"path": "/README.md",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "# pytorch yolo复现\n\n想着入门pytorch,用pytorch复现一下yolo算法,其实yolo的原理一天左右就完全搞懂了,但是真正写起代码来,就是会有各种细节不太清除,我是先从吴恩达的视频开始,然后参考着两位大佬的复现代码[eriklindernoren的代码](https://github.com/eriklindernoren/PyTorch-YOLOv3)、[bubbliiiing的代码](https://github.com/bubbliiiing/yolo3-pytorch),可能是我对pytorch还不太熟悉,陆陆续续搞了一个星期才写完了自己的yolo复现,并在`coco2014`与`voc2007`的数据集进行了训练\n\n```python\n+---------------+--------+--------+--------+\n| class | AP | R | P |\n+---------------+--------+--------+--------+\n| person | 0.6289 | 0.6502 | 0.8299 |\n| bicycle | 0.4096 | 0.4228 | 0.8567 |\n| car | 0.4843 | 0.5158 | 0.7979 |\n| motorbike | 0.5602 | 0.5727 | 0.9049 |\n| aeroplane | 0.7055 | 0.7087 | 0.9697 |\n| bus | 0.7271 | 0.7331 | 0.9340 |\n| train | 0.8222 | 0.8252 | 0.9594 |\n| truck | 0.3978 | 0.4182 | 0.8164 |\n| boat | 0.3047 | 0.3243 | 0.8213 |\n| trafficlight | 0.4162 | 0.4519 | 0.7878 |\n| firehydrant | 0.7988 | 0.8015 | 0.9720 |\n| stopsign | 0.6801 | 0.6851 | 0.9073 |\n| parkingmeter | 0.4942 | 0.5098 | 0.8529 |\n| bench | 0.3180 | 0.3323 | 0.8272 |\n| bird | 0.4487 | 0.4760 | 0.7925 |\n| cat | 0.8485 | 0.8574 | 0.9347 |\n| dog | 0.7597 | 0.7741 | 0.8810 |\n| horse | 0.7333 | 0.7393 | 0.9143 |\n| sheep | 0.5716 | 0.6132 | 0.7643 |\n| cow | 0.5034 | 0.5231 | 0.8685 |\n| elephant | 0.7189 | 0.7246 | 0.9534 |\n| bear | 0.8966 | 0.9004 | 0.9369 |\n| zebra | 0.7496 | 0.7534 | 0.9537 |\n| giraffe | 0.7887 | 0.7912 | 0.9735 |\n| backpack | 0.2107 | 0.2352 | 0.7640 |\n| umbrella | 0.4713 | 0.4921 | 0.8343 |\n| handbag | 0.1493 | 0.1706 | 0.7516 |\n| tie | 0.5108 | 0.5313 | 0.8255 |\n| suitcase | 0.4338 | 0.4492 | 0.8811 |\n| frisbee | 0.8040 | 0.8086 | 0.9380 |\n| skis | 0.2839 | 0.3079 | 0.8311 |\n| snowboard | 0.4891 | 0.5164 | 0.8742 |\n| sportsball | 0.5358 | 0.5582 | 0.8161 |\n| kite | 0.3883 | 0.4260 | 0.7788 |\n| baseballbat | 0.5519 | 0.5705 | 0.8969 |\n| baseballglove | 0.5524 | 0.5630 | 0.8807 |\n| skateboard | 0.6872 | 0.6965 | 0.9247 |\n| surfboard | 0.5677 | 0.5810 | 0.8927 |\n| tennisracket | 0.7075 | 0.7134 | 0.9539 |\n| bottle | 0.3835 | 0.4163 | 0.7795 |\n| wineglass | 0.4638 | 0.4822 | 0.8452 |\n| cup | 0.4557 | 0.4780 | 0.8194 |\n| fork | 0.3253 | 0.3446 | 0.8127 |\n| knife | 0.2310 | 0.2621 | 0.7278 |\n| spoon | 0.1965 | 0.2206 | 0.7402 |\n| bowl | 0.4338 | 0.4689 | 0.7844 |\n| banana | 0.2772 | 0.2950 | 0.8096 |\n| apple | 0.2573 | 0.3176 | 0.6220 |\n| sandwich | 0.4757 | 0.5058 | 0.8436 |\n| orange | 0.2994 | 0.3518 | 0.6737 |\n| broccoli | 0.2527 | 0.2814 | 0.7939 |\n| carrot | 0.2074 | 0.2399 | 0.7585 |\n| hotdog | 0.4637 | 0.5010 | 0.8407 |\n| pizza | 0.5795 | 0.6067 | 0.9006 |\n| donut | 0.5242 | 0.5701 | 0.7702 |\n| cake | 0.5441 | 0.5637 | 0.8535 |\n| chair | 0.3562 | 0.3804 | 0.8133 |\n| sofa | 0.5047 | 0.5366 | 0.7821 |\n| pottedplant | 0.3533 | 0.3737 | 0.8468 |\n| bed | 0.6665 | 0.6828 | 0.8699 |\n| diningtable | 0.3767 | 0.4216 | 0.7326 |\n| toilet | 0.8128 | 0.8223 | 0.9495 |\n| tvmonitor | 0.7254 | 0.7375 | 0.9046 |\n| laptop | 0.6993 | 0.7116 | 0.9033 |\n| mouse | 0.6550 | 0.6651 | 0.8871 |\n| remote | 0.3896 | 0.4120 | 0.8294 |\n| keyboard | 0.5573 | 0.5623 | 0.9444 |\n| cellphone | 0.4309 | 0.4468 | 0.8360 |\n| microwave | 0.7174 | 0.7310 | 0.8874 |\n| oven | 0.4837 | 0.5055 | 0.8761 |\n| toaster | 0.1239 | 0.1282 | 0.8333 |\n| sink | 0.4782 | 0.5131 | 0.8443 |\n| refrigerator | 0.6729 | 0.6802 | 0.9165 |\n| book | 0.1305 | 0.1876 | 0.4934 |\n| clock | 0.6956 | 0.7022 | 0.9333 |\n| teddybear | 0.4943 | 0.5045 | 0.9273 |\n| hairdrier | 0.0270 | 0.0270 | 1.0000 |\n| MAP | 0.5001 | | |\n+---------------+--------+--------+--------+\n```\n\n# 使用方式\n\n**1.数据集预处理**\n\n先从网上下载coco数据集,在data文件夹里如下图所示进行放置。\n\n```\n─coco\n ├─images\n │ ├─train2014\n │ └─val2014\n ├─labels\n │ ├─train2014(空)\n │ └─val2014(空)\n └─instances_train2014.json\n └─instances_val2014.json\n```\n\n```\npython utils/handleData.py\n```\n\n生成对应的txt标签\n\n**训练/预测/验证**\n\n```\npython train.py/predict.py/valid.py\n```\n\n\n\n预训练权重使用[bubbliiiing大佬的](https://github.com/bubbliiiing/yolo3-pytorch)\n\n# 搭建YOLOv3模型\n\n其实yolov3的模型十分简单,看起来很吓唬人,其实就一个主干网络`darknet53`加卷积块`convolutional set`与简单卷积核和上采样时的特征合并,下面这张图很全面低反映了整个网络的细节,因为网上的很多图片并没有把具体的细节,如通道数与padding展示出来,而且在非主干网络上也没有标明输出,因此我在图上用蓝色字添加上了`通道数 输出大小`的信息,**padding**的话除了核为1*1的padding为0,其余的padding都为1,也就是说,只有步长为2的时候,才不是“same”的padding\n\n\n\n从图上可以看出,其实整个几个部分:**darknet53**、**convolutional set**、**最后的输出层**,**上采样层**,但是他们中很多部分都由**普通的卷积层(带激活与正则化)**组成。\n\n**1.普通的卷积层(带激活与正则化)**\n\n```python\n#==================================#\n# 简单的卷积层 \n#==================================#\nclass Conv(nn.Module):\n def __init__(self, inputC, outputC, keralSize, stride = 1, padding = \"same\") -> None:\n super(Conv, self).__init__()\n self.m = nn.Sequential(\n nn.Conv2d(inputC, outputC, keralSize, stride, padding, bias=False),\n nn.BatchNorm2d(outputC),\n nn.LeakyReLU(0.1)\n )\n def forward(self, x):\n return self.m(x)\n```\n\n卷积--->正则化---->激活\n\n**2.残差块**\n\n其实残差块也是由普通卷积层构成,只是有跳连接的部分。\n\n```python\n#==================================#\n# 残差块 \n#==================================#\nclass Residual(nn.Module):\n def __init__(self, inputC) -> None:\n super(Residual, self).__init__()\n tempC = inputC // 2\n self.m = nn.Sequential(\n Conv(inputC, tempC, 1, 1, 0),\n Conv(tempC, inputC, 3, 1, 1)\n )\n def forward(self, x):\n return x + self.m(x)\n```\n\n**3.convolutional set**\n\n按照上图中的结构,可以很容易写出convolutional set,也是由普通残差块组成而已\n\n```python\n#==================================#\n# convSet \n#==================================#\nclass convSet(nn.Module):\n def __init__(self, inputC, outputC, midC) -> None:\n super(convSet, self).__init__()\n self.m = nn.Sequential(\n Conv(inputC, outputC, 1),\n Conv(outputC, midC, 3),\n Conv(midC, outputC, 1),\n Conv(outputC, midC, 3),\n Conv(midC, outputC, 1),\n )\n def forward(self, x):\n return self.m(x)\n```\n\n**4.输出层**\n\n输出层有三个,分别输出13*13、26\\*26、52\\*52的图像,这里我添加了创建对应格子的网络grid的过程,后面会用到,对于模型来说没有影响\n\n```python\n#==================================#\n# lastLayer \n#==================================#\nclass LastLayer(nn.Module):\n def __init__(self, inputC, outputC, anchor=None) -> None:\n super(LastLayer, self).__init__()\n self.grid = None\n self.anchor = np.array(anchor)\n self.anchorScaled = []\n self.stride = 1\n self.shape = None\n self.m = nn.Sequential(\n Conv(inputC, inputC * 2, 3),\n nn.Conv2d(inputC * 2, outputC, 1)\n )\n def forward(self, x):\n o = self.m(x)\n if self.grid is None:\n self._createGrid(o.shape)\n return o\n def _createGrid(self, shape):\n b,c,h,w = shape\n self.shape = (h, w)\n self.stride = CONST.inputShape[0] / h\n self.anchorScaled = torch.tensor(self.anchor / self.stride, device=CONST.device)\n grid = torch.ones((b,len(self.anchor),h,w,4),device=CONST.device)\n gridY, gridX = torch.meshgrid(torch.arange(h), torch.arange(w), indexing=\"ij\")\n grid[...,0] *= gridX.to(CONST.device).unsqueeze(0)\n grid[...,1] *= gridY.to(CONST.device).unsqueeze(0)\n grid[...,2] *= self.anchorScaled[:,0].view(1,len(self.anchor),1,1)\n grid[...,3] *= self.anchorScaled[:,1].view(1,len(self.anchor),1,1)\n self.grid = grid\n```\n\n**5.主干的darknet53**\n\n```python\n#==================================#\n# darknet53 \n#==================================#\nclass Darknet53(nn.Module):\n def __init__(self) -> None:\n super(Darknet53, self).__init__()\n # 定义darknet53的层数\n self.layoutNumber = [1, 2, 8, 8, 4]\n self.layerA = nn.Sequential(\n Conv(3, 32, 3, 1, 1),\n self.MultiResidual(32, 64, 1),\n self.MultiResidual(64, 128, 2),\n self.MultiResidual(128, 256, 8)\n )\n self.layerB = self.MultiResidual(256, 512, 8)\n self.layerC = self.MultiResidual(512, 1024, 4)\n\n # 进行权值初始化\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n \n def forward(self, x):\n out1 = self.layerA(x)\n out2 = self.layerB(out1)\n out3 = self.layerC(out2)\n return out1, out2, out3\n\n # 多层的残差网络\n def MultiResidual(self, inputC, outputC, count):\n t = [Conv(inputC, outputC, 3, 2, 1) if i == 0 else Residual(outputC) for i in range(count + 1)]\n return nn.Sequential(*t)\n```\n\n**6.最终的yolo模型**\n\n将上面的几个部分和在一起,就是最终的yolo模型了\n\n```python\n#==================================#\n# 定义yolo模型 \n#==================================#\nclass MyYOLO(nn.Module):\n def __init__(self) -> None:\n super(MyYOLO, self).__init__()\n # 得到 1024*13*13\n self.darknet53 = Darknet53()\n # 得到 512*13*13\n self.convSet1 = convSet(1024, 512, 1024)\n # 得到 256*26*26, 但是后面要和另一层的输出合起来,得到的应该是 (512+256)*26*26\n self.layerA = nn.Sequential(\n Conv(512, 256, 1),\n nn.Upsample(scale_factor=2, mode='nearest')\n )\n # 得到 256*26*26\n self.convSet2 = convSet(256 + 512, 256, 512)\n \n # 得到 128*52*52, 但是后面要和另一层的输出合起来,得到的应该是 (128+256)*52*52\n self.layerB = nn.Sequential(\n Conv(256, 128, 1),\n nn.Upsample(scale_factor=2, mode='nearest')\n )\n # 得到 256*26*26\n self.convSet3 = convSet(128 + 256, 128, 256)\n\n # 得到 75*13*13\n self.predict1 = LastLayer(512, CONST.anchorNumber * (5 + CONST.classNumber), anchor=CONST.anchor[0])\n # 得到 75*26*26\n self.predict2 = LastLayer(256, CONST.anchorNumber * (5 + CONST.classNumber), anchor=CONST.anchor[1])\n # 得到 75*52*52\n self.predict3= LastLayer(128, CONST.anchorNumber * (5 + CONST.classNumber), anchor=CONST.anchor[2])\n self.lastLayers = [self.predict1, self.predict2, self.predict3]\n def forward(self, x):\n x1, x2, x3 = self.darknet53(x)\n convOut1 = self.convSet1(x3)\n out1 = self.predict1(convOut1)\n layerOut = self.layerA(convOut1)\n additon = torch.cat([layerOut, x2], 1)\n convOut2 = self.convSet2(additon)\n out2 = self.predict2(convOut2)\n layerOut = self.layerB(convOut2)\n additon = torch.cat([layerOut, x1], 1)\n convOut3 = self.convSet3(additon)\n out3 = self.predict3(convOut3)\n return out1, out2, out3\n```\n\n**到目前为止,整个yolo模型构建完成**\n\n模型输出的是一个长度为3的list。每个list的大小分别是\n\n* [batch_size,先验框数量,13,13,5+类别数]\n* [batch_size,先验框数量,26,26,5+类别数]\n* [batch_size,先验框数量,52,52,5+类别数]\n\n## 训练\n\n### 1.获取数据集\n\n使用pytorch自带的`dataset`与`dataloader`进行数据集的加载,先从网上下载coco数据集,在data文件夹里如下图所示进行放置。\n\n```\n─coco\n ├─images\n │ ├─train2014\n │ └─val2014\n └─instances_train2014.json\n └─instances_val2014.json\n```\n\n然后使用`handleData.py`来进行数据集的预处理,生成标签与对应的目标框,并将目标框变为[x,y,w,h],中心点与宽高模式。在label文件夹下生成对应的label。\n\n```\n16 0.6066874999999999 0.3413807531380753 0.54415625 0.51\n```\n\n### 2.数据增强\n\n利用`imgaug`模块进行数据的预处理与增强,包括将图片保持宽高比地缩放为(416,416)大小,位移,镜像等操作\n\n```python\n# 应用iaa的图像数据增强类\nclass imgAug():\n def __init__(self) -> None:\n self.argument = None\n def __call__(self, data):\n img, boxes = data\n bbs = []\n for item in boxes:\n bbs.append(BoundingBox(*item[1:], label=item[0]))\n bbs = BoundingBoxesOnImage(bbs, shape=img.shape)\n img, bbs = self.argument(image = img, bounding_boxes=bbs)\n bbs = bbs.clip_out_of_image()\n for i, item in enumerate(bbs):\n boxes[i,:] = np.array([item.label, item.x1, item.y1, item.x2, item.y2])\n return img, boxes\n# 训练集数据增强\nTRAIN_TRANSFORMS = transforms.Compose([\n AbsoluteLabel(),\n xywh2xyxy(),\n ImgUp(),\n CenterPlcae(),\n ReSize(),\n RelativeLabel(),\n xyxy2xywh(),\n ToTensor()\n])\n```\n\n\n\n### 3.损失函数\n\n损失函数实际上就是:期望yolo模型的输出与实际yolo模型输出的损失,分别计算输出的x/y/w/h/置信度/分类的交叉熵损失,合并,再反向传播即可。\n\n* 处理目标框,通过交并比得到究竟是哪个先验框负责预测这个目标框,将先验框的索引记录下来,将所有目标框处理成格式如下的二维数组\n * `[对应batch中第几张图,类别,x,y,w,h,先验框索引]`\n * 并附上对应的格子索引`i`与`j`\n* 通过目标框处理后的数据可以得到期望yolo模型的输出\n* 预测值与期望值做交叉熵损失\n\n```python\n#==================================#\n# 损失函数 \n#==================================#\ndef getLoss(yoloOut, yolo,bboxes):\n BCELoss = nn.BCELoss()\n MSELoss = nn.MSELoss()\n bboxes = torch.cat([bboxes, torch.zeros(bboxes.shape[0],1,device=CONST.device)], 1)\n anchorRelate = torch.tensor(CONST.anchor, device=CONST.device).view(-1,2) / 416\n anchorRelate = torch.cat([torch.zeros_like(anchorRelate), anchorRelate], 1)\n boxesWH = torch.cat([torch.zeros_like(bboxes[:,4:6]), bboxes[:,4:6]], 1)\n for i,item in enumerate(boxesWH):\n bboxes[i][6] = torch.argmax(iou(item, anchorRelate)) # [bs, cls, x,y,w,h,an]\n # print(bboxes)\n loss = 0\n for l,output in enumerate(yoloOut):\n lastLayer = yolo.lastLayers[l]\n ba,c,h,w = output.shape\n output = output.view(ba,len(lastLayer.anchor),-1,h,w).permute(0,1,3,4,2).contiguous()\n b, cls, boxesScaled, an, i, j = buildTarget(bboxes, lastLayer, l)\n tConf = torch.zeros_like(output[..., 4], device=CONST.device)\n xLoss,yLoss,wLoss,hLoss,clsLoss = [0,0,0,0,0]\n if b.shape[0] != 0:\n pr = output[b, an, i, j] # type:torch.Tensor\n tConf[b, an, i, j] = 1\n pr[:,:2] = pr[:,:2].sigmoid()\n xLoss = BCELoss(pr[..., 0], boxesScaled[...,0])\n yLoss = BCELoss(pr[..., 1], boxesScaled[...,1])\n wLoss = MSELoss(pr[..., 2], boxesScaled[...,2]) * 0.5\n hLoss = MSELoss(pr[..., 3], boxesScaled[...,3]) * 0.5\n clsLoss = BCELoss(pr[:,5:].sigmoid(), cls)\n confLoss = BCELoss(output[..., 4].sigmoid(),tConf)\n loss = loss + xLoss + yLoss + wLoss + hLoss + clsLoss + confLoss\n return loss\n```\n\n### 4.训练\n\n利用pytorch提供的optim进行训练即可\n\n## 预测\n\n* 读取图片,进行预处理成训练时一样的格式\n* 放入网络前向传播,将输出整理成(batch_size,10647,类别数+5)\n* 将所有输出的框与其先验框进行合并,整理为绝对值的框\n* 对整理后的框进行置信度的筛选后,进行非极大值抑制\n* 抑制后的结果既为预测结果,在原图上绘制\n\n### 非极大值抑制\n\n```python\n#==================================#\n# 一个框与多个框的交并比 \n#==================================#\ndef iou(box1: torch.Tensor, box2:torch.Tensor, isleftT2rightD = True) -> torch.Tensor:\n # box1 的shape为(1, 4), box2的shape为(None, 4)\n # 防止输入错误\n box1 = box1.view(-1,4)\n box2 = box2.view(-1,4)\n box1 = box1.repeat((box2.shape[0], 1))\n if not isleftT2rightD:\n box1 = torch.concat([box1[:,:2] - box1[:,2:4] / 2, box1[:,:2] + box1[:,2:4] / 2], 1).cuda()\n box2 = torch.concat([box2[:,:2] - box2[:,2:4] / 2, box2[:,:2] + box2[:,2:4] / 2], 1).cuda()\n # 交集左上角的点\n lu = torch.max(box1[:, :2], box2[:, :2])\n # 交集右下角的点\n rd = torch.min(box1[:, 2:], box2[:, 2:])\n rectsN = rd - lu\n rectsN[rectsN < 0] = 0#没有重叠区域设置为0\n rectsN = rectsN[:,0] * rectsN[:,1]\n rectsU = (box1[:,2] - box1[:,0]) * (box1[:,3] - box1[:,1]) + (box2[:,2] - box2[:,0]) * (box2[:,3] - box2[:,1])\n return rectsN / (rectsU - rectsN)\n#==================================#\n# 非极大值抑制 \n#==================================#\ndef nms(box: torch.Tensor = None, score: torch.Tensor = None,threshold: float = 0.3) -> None:\n _, sortIndex = score.sort(0, descending = True)\n res = []\n while sortIndex.size(0):\n if sortIndex.size(0) == 1:\n res.append(sortIndex[0].item())\n break\n res.append(sortIndex[0].item())\n ious = iou(box[sortIndex[0]], box[sortIndex[1:]])\n sortIndex = sortIndex[1:][ious < threshold]\n return res\n```\n\n## 验证\n\n对模型进行验证,使用的指标为`R`、`P`、`MP`\n\n* 先进行预测\n\n* 遍历所有框,将预测框与真实框的交并比大于阈值,并且类别正确的标记为1,其余标记为0,整理成list,并且以置信度进行排序\n* 对list梯形求和,总和除以总数为`p`,总和除以真实标签的总和为`R`,对多个`p`、`r`求面积为`map`\n\n```python\n#==================================#\n# 计算模型参数 \n#==================================#\ndef calMap(isCor, preConf, preLabels, targetLabels):\n sI = np.argsort(-preConf)\n isCor = isCor[sI]\n preConf = preConf[sI]\n preLabels = preLabels[sI]\n uClasses = np.unique(targetLabels)\n R = []\n P = []\n AP = []\n for oneCls in uClasses:\n sI = preLabels == oneCls\n isCorOneCls = isCor[sI]\n\n targetLabelsOneCls = targetLabels[targetLabels == oneCls]\n tarTrueC = targetLabelsOneCls.size # 目标框为该类的数量\n preTrueC = isCorOneCls.size # 预测框为该类的数量\n\n if preTrueC == 0:\n R.append(0)\n P.append(0)\n AP.append(0)\n continue\n tpC = isCorOneCls.cumsum()\n fpC = (1 - isCorOneCls).cumsum()\n\n r = tpC / tarTrueC\n p = tpC / (tpC + fpC)\n R.append(r[-1])\n P.append(p[-1])\n # 在前面添加是往前取矩形,在后面添加是让召回率可以达到1\n r = np.concatenate(([0.0], r, [1.0]))\n p = np.concatenate(([0.0], p, [0.0]))\n # 保证p单调递减\n for i in range(p.size - 1, 0, -1):\n p[i - 1] = max(p[i], p[i - 1])\n # 删除重复项\n i = np.where(r[1:] != r[:-1])[0]\n ap = np.sum((r[i+1] - r[i]) * p[i+1])\n AP.append(ap)\n return R,P,AP,uClasses\n \n#==================================#\n# show MP \n#==================================#\ndef showMap(R,P,AP,uClasses):\n res = [[\"class\",\"AP\", \"R\", \"P\"]]\n for i,_ in enumerate(uClasses):\n res.append([CONST.classes[int(uClasses[i])], \"%.4f\" % AP[i], \"%.4f\" % R[i], \"%.4f\" % P[i]])\n res.append([])\n res.append([\"MAP\", \"%.4f\" % np.average(AP)])\n print(AsciiTable(res).table)\n```\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.518619954586029,
"alphanum_fraction": 0.5651697516441345,
"avg_line_length": 72.08000183105469,
"blob_id": "0ef7ad94b53a37525db4124fab1d045a1e3923a7",
"content_id": "351229c7f15f91dd6e6b627b26301f570f7d6855",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1862,
"license_type": "no_license",
"max_line_length": 793,
"num_lines": 25,
"path": "/util/const.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "#==================================#\n# 定义常数 \n#==================================#\nclass MyCONST():\n inputShape = (416, 416) # 网络输入大小\n anchorNumber = 3 # anchor框的多少\n cocoPath = \"F:/c/deepLearn/learn/pytorch/myYOLO/data/coco\"\n vocPath = \"F:/c/deepLearn/learn/pytorch/myYOLO/data/VOCdevkit/VOC2007\"\n device = 'cuda'\n batchSize = 2\n num_workers = 0\n epochs = 10\n anchorThes = 4\n valIOUTher = 0.5\n anchor = [\n [[116,90], [156,198], [373,326]], # 13*13\n [[30,61], [62,45], [59,119]], # 26*26\n [[10,13], [16,30], [33,23]], # 52*52\n ]\n anchorIndex = [[0,1,2],[3,4,5],[6,7,8]]\n cocoClass = [\"person\",\"bicycle\",\"car\",\"motorbike\",\"aeroplane\",\"bus\",\"train\",\"truck\",\"boat\",\"trafficlight\",\"firehydrant\",\"stopsign\",\"parkingmeter\",\"bench\",\"bird\",\"cat\",\"dog\",\"horse\",\"sheep\",\"cow\",\"elephant\",\"bear\",\"zebra\",\"giraffe\",\"backpack\",\"umbrella\",\"handbag\",\"tie\",\"suitcase\",\"frisbee\",\"skis\",\"snowboard\",\"sportsball\",\"kite\",\"baseballbat\",\"baseballglove\",\"skateboard\",\"surfboard\",\"tennisracket\",\"bottle\",\"wineglass\",\"cup\",\"fork\",\"knife\",\"spoon\",\"bowl\",\"banana\",\"apple\",\"sandwich\",\"orange\",\"broccoli\",\"carrot\",\"hotdog\",\"pizza\",\"donut\",\"cake\",\"chair\",\"sofa\",\"pottedplant\",\"bed\",\"diningtable\",\"toilet\",\"tvmonitor\",\"laptop\",\"mouse\",\"remote\",\"keyboard\",\"cellphone\",\"microwave\",\"oven\",\"toaster\",\"sink\",\"refrigerator\",\"book\",\"clock\",\"vase\",\"scissors\",\"teddybear\",\"hairdrier\",\"toothbrush\"]\n vocClass = [\"aeroplane\",\"bicycle\",\"bird\",\"boat\",\"bottle\",\"bus\",\"car\",\"cat\",\"chair\",\"cow\",\"diningtable\",\"dog\",\"horse\",\"motorbike\",\"person\",\"pottedplant\",\"sheep\",\"sofa\",\"train\",\"tvmonitor\"]\n classes = cocoClass\n classNumber = len(classes) # 种类数量 voc 20 coco 80\nCONST = MyCONST()"
},
{
"alpha_fraction": 0.47598960995674133,
"alphanum_fraction": 0.4865347146987915,
"avg_line_length": 37.049381256103516,
"blob_id": "631d7bc2737376fdf592207f0a9f4f6e698ae387",
"content_id": "b1bb2d9092a981fa5e43fee0d965bcb7fa14eb03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6392,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 162,
"path": "/util/handleData.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "import json\nimport numpy as np\nimport random\nimport xml.etree.ElementTree as ET\nfrom const import CONST\nimport json\nimport os\n#=========================================================\n# 处理原始coco数据集,生成txt文件\n#=========================================================\ndef handleCOCO():\n # 处理验证集\n res = {}\n with open(f'{CONST.cocoPath}/instances_val2014.json', 'r') as f:\n data = json.load(f)\n classes = []\n # 读取类型\n for item in data[\"categories\"]:\n classes.append(item[\"id\"])\n # 读取图片名字\n for item in data[\"images\"]:\n res[str(item[\"id\"])] = {}\n res[str(item[\"id\"])][\"imgName\"] = item[\"file_name\"]\n res[str(item[\"id\"])][\"w\"] = item[\"width\"]\n res[str(item[\"id\"])][\"h\"] = item[\"height\"]\n res[str(item[\"id\"])][\"bbox\"] = []\n # 读取bbox\n for item in data[\"annotations\"]:\n resItem = res[str(item[\"image_id\"])]\n t = [classes.index(int(item[\"category_id\"]))]\n bbox = np.array(item[\"bbox\"], dtype=\"float\")\n bbox[:2] += bbox[2:] / 2\n w = resItem[\"w\"]\n h = resItem[\"h\"]\n bbox = (bbox / np.array([w,h,w,h])).astype(float)\n t.extend(bbox.tolist())\n resItem[\"bbox\"].append(t)\n # 输出成txt文件\n valName = []\n for k,v in res.items():\n valName.append(v[\"imgName\"])\n with open(f'{CONST.cocoPath}/labels/val2014/{v[\"imgName\"]}.txt',\"w\") as f:\n bbox = v[\"bbox\"]\n bbox = [[str(b) for b in item] for item in bbox]\n bbox = [\" \".join(item) for item in bbox]\n bbox = '\\n'.join(bbox)\n f.write(bbox)\n valName.sort()\n with open(f'{CONST.cocoPath}/val2014.txt',\"w\") as f:\n f.write('\\n'.join(valName))\n # 处理训练集\n res = {}\n with open(f'{CONST.cocoPath}/instances_train2014.json', 'r') as f:\n data = json.load(f)\n classes = []\n # 读取类型\n for item in data[\"categories\"]:\n classes.append(item[\"id\"])\n # 读取图片名字\n for item in data[\"images\"]:\n res[str(item[\"id\"])] = {}\n res[str(item[\"id\"])][\"imgName\"] = item[\"file_name\"]\n res[str(item[\"id\"])][\"w\"] = item[\"width\"]\n res[str(item[\"id\"])][\"h\"] = item[\"height\"]\n res[str(item[\"id\"])][\"bbox\"] = []\n # 读取bbox\n for item in data[\"annotations\"]:\n resItem = res[str(item[\"image_id\"])]\n t = [classes.index(int(item[\"category_id\"]))]\n bbox = np.array(item[\"bbox\"], dtype=\"float\")\n bbox[:2] += bbox[2:] / 2\n w = resItem[\"w\"]\n h = resItem[\"h\"]\n bbox = (bbox / np.array([w,h,w,h])).astype(float)\n t.extend(bbox.tolist())\n resItem[\"bbox\"].append(t)\n # 输出成txt文件\n valName = []\n for k,v in res.items():\n valName.append(v[\"imgName\"])\n with open(f'{CONST.cocoPath}/labels/train2014/{v[\"imgName\"]}.txt',\"w\") as f:\n bbox = v[\"bbox\"]\n bbox = [[str(b) for b in item] for item in bbox]\n bbox = [\" \".join(item) for item in bbox]\n bbox = '\\n'.join(bbox)\n f.write(bbox)\n valName.sort()\n with open(f'{CONST.cocoPath}/train2014.txt',\"w\") as f:\n f.write('\\n'.join(valName))\n print(\"处理COCO完成\")\n\ntrain_test = 9 # 训练集与测试集比例\ntrain_val = 9 # 训练集与验证集比例\n#=========================================================\n# 处理原始coco数据集,生成txt文件\n#=========================================================\ndef getImgData(filePath):\n root=ET.parse(CONST.vocPath + \"/Annotations/\" + filePath).getroot()\n picInfo = {\n \"id\": filePath[:filePath.index(\".xml\")],\n \"boxes\": []\n }\n sizeBox = root.find('size')\n w = int(sizeBox.find(\"width\").text)\n h = int(sizeBox.find(\"height\").text)\n for obj in root.iter('object'):\n # 不使用difficult的数据\n if obj.find('difficult') is not None and int(obj.find('difficult').text) == 1:\n continue\n cls = obj.find('name').text # 类名\n if cls not in CONST.vocClass:\n continue\n clsType = CONST.vocClass.index(cls)\n xmlbox = obj.find('bndbox')\n box = [int(clsType),int(xmlbox.find(\"xmin\").text) / w, int(xmlbox.find(\"ymin\").text) / h, int(xmlbox.find(\"xmax\").text) / w, int(xmlbox.find(\"ymax\").text) / h]\n picInfo[\"boxes\"].append(box)\n \n return picInfo\ndef handleVoc():\n # 获取数据集数据\n random.seed(1)\n xmlFile = os.listdir(CONST.vocPath + \"/Annotations\")\n xmlFile = [x for x in xmlFile if x.find(\".xml\") != -1]\n imgDatas = []\n # 解析xml\n for item in xmlFile:\n imgDatas.append(getImgData(item))\n random.shuffle(imgDatas)\n test = imgDatas[:len(imgDatas) // train_test]\n imgDatas = imgDatas[len(imgDatas) // train_test:]\n val = imgDatas[: len(imgDatas) // train_val]\n train = imgDatas[len(imgDatas) // train_val:]\n\n fileNames = []\n for item in val:\n imgName = item[\"id\"] + \".jpg\"\n fileNames.append(imgName)\n boxes = np.array(item[\"boxes\"])\n # print(item)\n boxes[:,1:] = np.concatenate([(boxes[:,3:5] + boxes[:,1:3]) / 2, boxes[:,3:5] - boxes[:,1:3]], 1)\n np.savetxt(f'{CONST.vocPath}/labels/val/{imgName}.txt', boxes, fmt=\"%f\")\n fileNames.sort()\n with open(f'{CONST.vocPath}/val2007.txt',\"w\") as f:\n f.write('\\n'.join(fileNames))\n fileNames = []\n for item in train:\n imgName = item[\"id\"] + \".jpg\"\n fileNames.append(imgName)\n boxes = np.array(item[\"boxes\"])\n boxes[:,1:] = np.concatenate([(boxes[:,3:5] + boxes[:,1:3]) / 2, boxes[:,3:5] - boxes[:,1:3]], 1)\n np.savetxt(f'{CONST.vocPath}/labels/train/{imgName}.txt', boxes, fmt=\"%f\")\n fileNames.sort()\n with open(f'{CONST.vocPath}/train2007.txt',\"w\") as f:\n f.write('\\n'.join(fileNames))\n print(\"处理VOC完成\")\n\n#=========================================================\n# 主程序\n#========================================================= \nif __name__ == \"__main__\":\n handleCOCO()\n # handleVoc()\n"
},
{
"alpha_fraction": 0.5173847079277039,
"alphanum_fraction": 0.5415722131729126,
"avg_line_length": 35.76388931274414,
"blob_id": "d1006d9635405690fb6039d2e28650f185673987",
"content_id": "816562696daddaf9c92bbab169d03008f3cce310",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2792,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 72,
"path": "/predict.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append(\"./util\")\nfrom imgaug.augmentables import bbs\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom util.const import CONST\nfrom imgaug.augmentables.bbs import BoundingBox\nfrom imgaug.augmentables.bbs import BoundingBoxesOnImage\nimport imgaug.augmenters as iaa\nfrom util.model import MyYOLO, getWeight\nfrom util.utils import handleBox,notMaxSuppression\n\n#==================================#\n# 原图坐标映射 \n#==================================#\ndef refer(input, imageSize, netInputSize = (416,416), isHold = False):\n if not isHold:\n for batchNum, val in enumerate(input):\n val[...,:4] = val[...,:4] * torch.Tensor(imageSize).repeat((1,2)).to(CONST.device)\n input[batchNum] = val\n return input\n\n#==================================#\n# 展示图片 \n#==================================#\ndef showPic(output, rawImg):\n output = output.to(\"cpu\")\n oLable = np.array(output[:,6], dtype=\"int32\")\n oBox = np.array(output[:,:4], dtype=\"int32\")\n oP = np.array(output[:,4] * output[:, 5], dtype=\"float\")\n boxes = []\n for i, box in enumerate(oBox):\n boxes.append(BoundingBox(*box.tolist(),label=CONST.classes[oLable[i]] + \" \" + str(int(oP[i] * 100) / 100)))\n rawImg = np.array(rawImg)\n bbs = BoundingBoxesOnImage(boxes, shape=rawImg.shape)\n Image.fromarray(bbs.draw_on_image(rawImg, size=4, alpha=0.9)).show()\n#==================================#\n# 预测 \n#==================================#\ndef predict(img, isShow=True):\n rawImg = Image.open(img).convert(\"RGB\")\n img = np.array(rawImg)\n img = iaa.Sequential([\n iaa.Resize((416,416))\n ])(image=img)\n img = np.transpose(img / 255, (2, 0, 1))\n with torch.no_grad():\n img = torch.from_numpy(img).unsqueeze(0).type(torch.float).to(CONST.device)\n yolo = MyYOLO().to(CONST.device) # type: torch.nn.Module\n # 迁移学习\n getWeight(yolo)\n # 预测图片\n output = yolo(img) # 返回的数据大小为[(batchSize, 255,13,13), (batchSize, 255,13,13)]\n output = handleBox(output, yolo) # 处理先验框 返回的数据大小为(batchSize, 10647, 85)\n output = notMaxSuppression(output) # 非极大值抑制\n\n print(f\"抑制后的结果:{output[0].shape}\")\n output = refer(output, rawImg.size) # 将图片映射到原图坐标\n output = output[0].to(\"cpu\")\n if len(output) == 0:\n print(\"没有找到特征\")\n exit()\n if isShow:\n showPic(output, rawImg)\n return output\n\n#==================================#\n# 主函数 \n#==================================#\nif __name__=='__main__':\n predict(\"./data/testImg/street.jpg\")"
},
{
"alpha_fraction": 0.5351518392562866,
"alphanum_fraction": 0.5533365607261658,
"avg_line_length": 34.19428634643555,
"blob_id": "761302e817e258e5c610c3c0a077bcfff1e250ed",
"content_id": "3d4b8962144f8d03f96d1e3e41d627fc04d2ebad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6445,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 175,
"path": "/util/dataload.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "#--------------------------------------------------------\n# 加载数据集\n#--------------------------------------------------------\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom const import *\nimport xml.etree.ElementTree as ET\nfrom PIL import Image\nfrom imgaug.augmentables.bbs import BoundingBox\nfrom imgaug.augmentables.bbs import BoundingBoxesOnImage\nimport imgaug.augmenters as iaa\nfrom PIL import Image\nimport numpy as np\nfrom torchvision import transforms\nimport warnings\n#==================================#\n# 读取数据集 \n#==================================#\nclass YOLODataSet(Dataset):\n def __init__(self, type=\"coco\", train=True) -> None:\n super(YOLODataSet, self).__init__()\n self.type = type\n if train:\n self.dataType = 'train'\n self.transform = TRAIN_TRANSFORMS\n else:\n self.dataType = 'val'\n self.transform = VAL_TRANSFORMS\n\n if type == \"coco\":\n with open(CONST.cocoPath + f'/{self.dataType}2014.txt', \"r\") as file:\n self.imgFiles = file.readlines()\n self.imgFiles =[ item.strip('\\n') for item in self.imgFiles]\n elif type == 'voc':\n with open(CONST.vocPath + f'/{self.dataType}2007.txt', \"r\") as file:\n self.imgFiles = file.readlines()\n self.imgFiles =[ item.strip('\\n') for item in self.imgFiles] \n\n def __len__(self):\n return len(self.imgFiles)\n \n def __getitem__(self, index):\n fileName = self.imgFiles[index]\n if self.type == 'coco':\n img = Image.open(f\"{CONST.cocoPath}/images/{self.dataType}2014/{fileName}\").convert(\"RGB\")\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n boxes = np.loadtxt(f\"{CONST.cocoPath}/labels/{self.dataType}2014/{fileName}.txt\").reshape((-1, 5))\n elif self.type == 'voc':\n img = Image.open(f\"{CONST.vocPath}/JPEGImages/{fileName}\").convert(\"RGB\")\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n boxes = np.loadtxt(f\"{CONST.vocPath}/labels/{self.dataType}/{fileName}.txt\").reshape((-1, 5))\n img = np.array(img)\n if boxes is None:\n boxes = []\n img, boxes = self.transform((img, boxes))\n # print(fileName)\n return img, boxes\n\n\n# 绝对值化标签\nclass AbsoluteLabel():\n def __call__(self, data):\n img, boxes = data\n h, w = img.shape[:2]\n boxes[:,1:] *= np.array([w,h,w,h])\n return img, boxes\n# 中点长宽框转换为左上角右下角框\nclass xywh2xyxy():\n def __call__(self, data):\n img, boxes = data\n boxes[:,1:] = np.concatenate(((boxes[:,1:3] - boxes[:,3:5] / 2), (boxes[:,1:3] + boxes[:,3:5] / 2)), axis=1)\n return img, boxes\n# 左上角右下角转换为框中点长宽框\nclass xyxy2xywh():\n def __call__(self, data):\n img, boxes = data\n boxes[:,1:] = np.concatenate((((boxes[:,1:3] + boxes[:,3:5]) / 2), (boxes[:,3:5] - boxes[:,1:3])), axis=1)\n return img, boxes\n# 相对化标签\nclass RelativeLabel():\n def __call__(self, data):\n img, boxes = data\n h, w = img.shape[:2]\n boxes[:,1:] /= np.array([w,h,w,h])\n return img, boxes\n# 应用iaa的图像数据增强类\nclass imgAug():\n def __init__(self) -> None:\n self.argument = None\n def __call__(self, data):\n img, boxes = data\n bbs = []\n for item in boxes:\n bbs.append(BoundingBox(*item[1:], label=item[0]))\n bbs = BoundingBoxesOnImage(bbs, shape=img.shape)\n img, bbs = self.argument(image = img, bounding_boxes=bbs)\n bbs = bbs.clip_out_of_image()\n for i, item in enumerate(bbs):\n boxes[i,:] = np.array([item.label, item.x1, item.y1, item.x2, item.y2])\n return img, boxes\n\n# 图片长宽比为1\nclass CenterPlcae(imgAug):\n def __init__(self) -> None:\n super(CenterPlcae, self).__init__()\n self.argument = iaa.Sequential([iaa.PadToAspectRatio(1.0,position=\"center-center\")])\n\n# 图像数据增强\nclass ImgUp(imgAug):\n def __init__(self) -> None:\n super(ImgUp, self).__init__()\n # 平移缩放、锐化、改变亮度、改变色调、翻转\n self.argument = iaa.Sometimes(0.7, iaa.Sequential([\n iaa.Sometimes(0.8, iaa.Affine(scale=(0.5, 1.2), translate_percent=(-0.2, 0.2))),\n iaa.Sometimes(0.5, iaa.Sharpen((0.0, 0.1))),\n iaa.Sometimes(0.5, iaa.AddToBrightness((-60, 40))),\n iaa.Sometimes(0.5, iaa.AddToHue((-10, 10))),\n iaa.Sometimes(0.3, iaa.Fliplr(1)),\n ]))\n# 改变尺寸\nclass ReSize(imgAug):\n def __init__(self) -> None:\n super(ReSize, self).__init__()\n # 平移缩放、锐化、改变亮度、改变色调、翻转\n self.argument = iaa.Resize((416, 416))\n# 转换为tensor\nclass ToTensor():\n def __call__(self, data):\n img, boxes = data\n img = np.transpose(img / 255, (2, 0, 1))\n img = torch.Tensor(img)\n boxes = torch.Tensor(boxes)\n return img, boxes\n\n#==================================#\n# 统一数据格式 \n#==================================#\ndef yolo_dataset_collate(data):\n images = None\n bboxes = None\n for i,(img, boxes) in enumerate(data):\n images = torch.cat([images, img.unsqueeze(0)]) if images is not None else img.unsqueeze(0)\n t = torch.cat([torch.ones((boxes.shape[0], 1)) * i , boxes], 1)\n bboxes = torch.cat([bboxes, t]) if bboxes is not None else t\n images = images\n return images, bboxes\n# 训练集数据增强\nTRAIN_TRANSFORMS = transforms.Compose([\n AbsoluteLabel(),\n xywh2xyxy(),\n ImgUp(),\n CenterPlcae(),\n ReSize(),\n RelativeLabel(),\n xyxy2xywh(),\n ToTensor()\n])\n# 验证集不需要数据增强\nVAL_TRANSFORMS = transforms.Compose([\n AbsoluteLabel(),\n xywh2xyxy(),\n CenterPlcae(),\n ReSize(),\n RelativeLabel(),\n xyxy2xywh(),\n ToTensor()\n])\ntrainDataSet = YOLODataSet(train=True)\ntrainDataLoader = DataLoader(trainDataSet, batch_size=CONST.batchSize, num_workers=CONST.num_workers, shuffle=False, pin_memory=True,\n drop_last=True,collate_fn=yolo_dataset_collate)\n"
},
{
"alpha_fraction": 0.4468686878681183,
"alphanum_fraction": 0.49272727966308594,
"avg_line_length": 42.05217361450195,
"blob_id": "8a8c5f3a1fef288ca10391a0b6eb5dd4a15a0182",
"content_id": "4bbb11f93f867f980781af3e4c326d505b2b6c5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5304,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 115,
"path": "/util/utils.py",
"repo_name": "woozi1122/pytorch-yolov",
"src_encoding": "UTF-8",
"text": "import torch\nfrom const import CONST\n\n#==================================#\n# 处理先验证框,将先验框映射到与输入尺寸同样的大小 \n#==================================#\ndef handleBox(input, yolo):\n outputs = []\n lastLayers = yolo.lastLayers\n for i, lasterLayer in enumerate(lastLayers):\n b, c, h, w = input[i].shape\n res = input[i].view(b, len(lasterLayer.anchor), -1, h, w).permute(0, 1, 3, 4, 2).contiguous()\n res[...,[0,1,4]] = res[...,[0,1,4]].sigmoid()\n res[...,5:] = res[...,5:].sigmoid()\n res[...,:2] = res[...,:2] + lasterLayer.grid[..., :2]\n res[...,2:4] = torch.exp(res[...,2:4]) * lasterLayer.grid[..., 2:4]\n res[...,:4] = res[...,:4] / torch.Tensor([w,h,w,h]).to(CONST.device)\n res = res.view(b,-1,5+CONST.classNumber)\n outputs.append(res)\n return torch.cat(outputs,1)\n\n#==================================#\n# 一个框与多个框的交并比 \n#==================================#\ndef iou(box1: torch.Tensor, box2:torch.Tensor, isleftT2rightD = True) -> torch.Tensor:\n # box1 的shape为(1, 4), box2的shape为(None, 4)\n # 防止输入错误\n box1 = box1.view(-1,4)\n box2 = box2.view(-1,4)\n box1 = box1.repeat((box2.shape[0], 1))\n if not isleftT2rightD:\n box1 = torch.concat([box1[:,:2] - box1[:,2:4] / 2, box1[:,:2] + box1[:,2:4] / 2], 1).cuda()\n box2 = torch.concat([box2[:,:2] - box2[:,2:4] / 2, box2[:,:2] + box2[:,2:4] / 2], 1).cuda()\n # 交集左上角的点\n lu = torch.max(box1[:, :2], box2[:, :2])\n # 交集右下角的点\n rd = torch.min(box1[:, 2:], box2[:, 2:])\n rectsN = rd - lu\n rectsN[rectsN < 0] = 0#没有重叠区域设置为0\n rectsN = rectsN[:,0] * rectsN[:,1]\n rectsU = (box1[:,2] - box1[:,0]) * (box1[:,3] - box1[:,1]) + (box2[:,2] - box2[:,0]) * (box2[:,3] - box2[:,1])\n return rectsN / (rectsU - rectsN)\n#==================================#\n# 两组框的一一交并比 \n#==================================#\ndef iouOne2One(box1, box2, xyxy=False):\n box1 = box1.view(-1, 4)\n box2 = box2.view(-1, 4)\n if not xyxy:\n box1 = torch.concat([box1[:,:2] - box1[:,2:4] / 2, box1[:,:2] + box1[:,2:4] / 2], 1).to(CONST.device)\n box2 = torch.concat([box2[:,:2] - box2[:,2:4] / 2, box2[:,:2] + box2[:,2:4] / 2], 1).to(CONST.device)\n res = torch.zeros(box1.shape[0])\n for i in range(box1.shape[0]):\n res[i] = iou(box1[i], box2[i])\n return res\n#==================================#\n# 非极大值抑制 \n#==================================#\ndef nms(box: torch.Tensor = None, score: torch.Tensor = None,threshold: float = 0.3) -> None:\n _, sortIndex = score.sort(0, descending = True)\n res = []\n while sortIndex.size(0):\n if sortIndex.size(0) == 1:\n res.append(sortIndex[0].item())\n break\n res.append(sortIndex[0].item())\n ious = iou(box[sortIndex[0]], box[sortIndex[1:]])\n sortIndex = sortIndex[1:][ious < threshold]\n return res\n # 交并比\n\n#==================================#\n# 非极大值抑制 \n#==================================#\ndef notMaxSuppression(inputVal, confThres = 0.5):\n # 化为左上角+右下角坐标\n box_corner = inputVal.new(inputVal.shape)\n box_corner[:, :, 0] = inputVal[:, :, 0] - inputVal[:, :, 2] / 2\n box_corner[:, :, 1] = inputVal[:, :, 1] - inputVal[:, :, 3] / 2\n box_corner[:, :, 2] = inputVal[:, :, 0] + inputVal[:, :, 2] / 2\n box_corner[:, :, 3] = inputVal[:, :, 1] + inputVal[:, :, 3] / 2\n inputVal[:, :, :4] = box_corner[:, :, :4]\n output = [None for _ in inputVal]\n for i, prediction in enumerate(inputVal):\n # 置信度与对应的类型\n classP, classType = torch.max(prediction[:, 5:], 1, keepdim=True)\n # 利用置信度进行第一轮筛选\n confMask = (prediction[...,4] * classP[...,0] >= confThres)\n \n prediction = prediction[confMask]\n classP = classP[confMask]\n classType = classType[confMask]\n \n if not prediction.shape[0]:\n continue\n # 整合数据\n prediction = torch.cat([prediction[:,:5], classP, classType], 1)\n uniqueClass = prediction[:, -1].unique()\n # 对每一类分别进行非极大值抑制\n for uClassType in uniqueClass:\n tPrediction = prediction[prediction[:, -1] == uClassType]\n # if tPrediction.size(0) == 1:\n # continue\n res = nms(tPrediction[:,:4], tPrediction[:,4] * tPrediction[:,5], threshold=0.3)\n # res = torchvision.ops.nms(tPrediction[:,:4], tPrediction[:,4] * tPrediction[:,5], 0.3) 这是torch自带的的nms\n tPrediction = tPrediction[res]\n output[i] = tPrediction if output[i] is None else torch.cat([output[i], tPrediction])\n return output\n\n# 中点长宽框转换为左上角右下角框\ndef xywh2xyxy(boxes):\n return torch.cat(((boxes[:,0:2] - boxes[:,2:4] / 2), (boxes[:,0:2] + boxes[:,2:4] / 2)), axis=1)\n# 左上角右下角转换为框中点长宽框\ndef xyxy2xywh(boxes):\n return torch.cat((((boxes[:,0:2] + boxes[:,2:4]) / 2), (boxes[:,2:4] - boxes[:,0:2])), axis=1)"
}
] | 9 |
BoyangQiu/functions-grapher
|
https://github.com/BoyangQiu/functions-grapher
|
a135b664636d8e87361149adac400638019efbff
|
26570251143161d848e3f81fc2be275132faf54a
|
6f20826a010d5ecbdc6e734bfcc44c644ad1bcdd
|
refs/heads/master
| 2022-11-11T03:43:57.755884 | 2020-07-06T14:07:22 | 2020-07-06T14:07:22 | 234,192,068 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5070810317993164,
"alphanum_fraction": 0.556648313999176,
"avg_line_length": 28.650602340698242,
"blob_id": "04afb69db68bf434270b02e56b87fd28da6d1fa7",
"content_id": "929e3789811e5b7218d8ec4cbeb709103ee7c712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2542,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 83,
"path": "/drawfunctions.py",
"repo_name": "BoyangQiu/functions-grapher",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport tkinter as tk\r\nfrom tkinter import *\r\nimport math\r\n\r\nroot = Tk() \r\nroot.geometry('500x275')\r\nroot.title('Input Functions')\r\n# Spacer\r\nLabel(root, text = ' ').grid(row=0,column=0)\r\n\r\ne1 = Entry(root, width=45, borderwidth = 5)\r\nLabel(root, text = 'Function 1 : y =').grid(row=0,column=1)\r\ne1.grid(row=0, column=2, columnspan = 3, padx = 10, pady=20)\r\n\r\ne2 = Entry(root, width=45, borderwidth = 5)\r\nLabel(root, text = 'Function 2 : y =').grid(row=1,column=1)\r\ne2.grid(row=1, column=2, columnspan = 3, padx = 10, pady=20)\r\n\r\nstart = Entry(root, width=5, borderwidth = 5)\r\nLabel(root, text = 'Plot from (Default -10 to 10): ').grid(row=3,column=1)\r\nstart.insert(0, -10)\r\nstart.grid(row=3, column=2,padx = 10)\r\n\r\n\r\nLabel(root, text = 'to').place(relx = 0.65, rely = 0.53, anchor = CENTER)\r\n\r\nend = Entry(root, width=5, borderwidth = 5)\r\n#Label(root, text = ' To : ').grid(row=3,column=3)\r\nend.insert(0,'10')\r\nend.grid(row=3, column=3,padx = 10)\r\n\r\n\r\ndef plot_func():\r\n if start.get() == '':\r\n left = -10\r\n else:\r\n left = int(start.get())\r\n if end.get() == '':\r\n right = 10\r\n else:\r\n right = int(end.get())\r\n \r\n x = np.arange(left, right+0.2, 0.2)\r\n func1 = e1.get()\r\n func2 = e2.get()\r\n if 'sqrt' in (func1 or func2):\r\n func1 = func1.replace('sqrt', 'np.sqrt')\r\n func2 = func2.replace('sqrt', 'np.sqrt')\r\n f1 = eval(func1)\r\n f2 = eval(func2)\r\n plt.figure(figsize = (6,6))\r\n # plt.scatter(x, f2, color = 'r', marker = 'x')\r\n # plt.scatter(x, f1, color = 'b', marker = 'o')\r\n #Draws line connecting the points\r\n plt.plot(x, f1, color = 'b', label = f'y = {e1.get()}') \r\n plt.plot(x, f2, color = 'r', label = f'y = {e2.get()}')\r\n #Adds gridlines to the graph\r\n plt.grid()\r\n #Sets axes limits\r\n plt.xticks = (np.arange(min(x), max(x)))\r\n #Draws the x and y lines of origin\r\n plt.axhline(0, color = 'black', lw = 1.5)\r\n plt.axvline(0, color = 'black', lw = 1.5)\r\n\r\n #Add legend to the plot\r\n plt.legend(bbox_to_anchor=(1, 1.15))\r\n plt.show()\r\n \r\ndef _quit():\r\n root.quit() # stops mainloop\r\n root.destroy() \r\n \r\nenter = Button(root, text = 'Plot', padx = 50, pady = 10, command = plot_func)\r\nenter.grid(row = 4, column = 2, pady = 10)\r\n\r\nquit = enter = Button(root, text = 'Quit', padx = 50, pady = 10, command = _quit)\r\nquit.grid(row=4, column = 3, pady = 10)\r\n\r\n\r\n\r\nroot.mainloop()"
},
{
"alpha_fraction": 0.7377049326896667,
"alphanum_fraction": 0.7611241340637207,
"avg_line_length": 39.66666793823242,
"blob_id": "c520832bc22c19fd61f74fd856ff5c13eeca651e",
"content_id": "6ba7a335da991937befcec851e350c9e84cb7ffc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 21,
"path": "/README.md",
"repo_name": "BoyangQiu/functions-grapher",
"src_encoding": "UTF-8",
"text": "# Functions Graphing Calculator\n\nA simple GUI to allow for the user to input two functions of their choice plotted from a range of values.\nThis allows the user to compare and contrast function transformations.\n\n---\n\n**Drawfunctions.py**\n\n<i>Description:</i> A tool which allows for two functions to be graphed, allowing for the comparison between them and visualizing the\nrelative transformations between them. The functions are inputted as a function of x into the GUI and the range which they will be plotted\ncan also be specified (set to -10, 10 by default).\n\n<i>Note</i>: Functions MUST be inputted as a function of x and Pythonic mathematical operators must be used. Square roots can be inputted \nas 'sqrt'.\n\n<i>Examples</i>: \n\n<img src=\"https://puu.sh/FLqko/d2ce693fd6.png\" width=\"40%\">\n\n<img src=\"https://puu.sh/FLqnA/697b9738fc.png\" width=\"40%\">\n"
}
] | 2 |
cloudfiles-me/flask-redis
|
https://github.com/cloudfiles-me/flask-redis
|
d898159c44fd9f63a01e5145cc0d0c21d175dcc3
|
9c937345e016b3403548b110550e841e2b7079f2
|
b7889a6c93b7e36ac4bdb7790225e59319ee77b9
|
refs/heads/master
| 2021-09-16T02:45:35.318428 | 2018-06-15T01:15:48 | 2018-06-15T01:15:48 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5998125672340393,
"alphanum_fraction": 0.6110590696334839,
"avg_line_length": 25.674999237060547,
"blob_id": "b5ba603335d227a25c4a29607263f2f48f6ce239",
"content_id": "2c0db750b5e1d0dd756e8e634445d27ce439fcf8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1067,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 40,
"path": "/app.py",
"repo_name": "cloudfiles-me/flask-redis",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom rediscluster import StrictRedisCluster\n\napp = Flask(__name__)\nstartup_nodes = [{\"host\": \"cache-cluster.j130cz.clustercfg.use1.cache.amazonaws.com\", \"port\": \"6379\"}]\ndb = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=True)\n\[email protected]('/')\ndef hello_world():\n name = db.get(\"Name\")\n if (name != None):\n return 'Hello my dear %s' %name\n else:\n return 'Key not ready'\n\[email protected]('/listnames')\ndef list():\n nameslen = db.llen(\"names\")\n message = ''\n if (nameslen > 0):\n #for i in range(0, nameslen):\n for i in reversed(range(0, nameslen)):\n name = db.lindex('names', i)\n message = message + \"%s \\n\" %name\n return message\n else:\n return 'No names in the system'\n\[email protected]('/addname/<name>')\ndef setname(name):\n db.lpush(\"names\", name.encode('utf-8'))\n return 'Name added'\n\[email protected]('/delname')\ndef delname():\n db.delete('Name')\n return 'Name deleted'\n\nif __name__ == '__main__':\n app.run()\n"
}
] | 1 |
wowoa123/kanji-to-kana
|
https://github.com/wowoa123/kanji-to-kana
|
00e0f3f9045e34306c84a7d1f9b656eb41f9b8a2
|
c1d9334c359b098863637f404f5b76dd0fcdaf1b
|
5f8119e2c9892c52cddd05f41b804f6542fb5d69
|
refs/heads/master
| 2020-03-27T06:24:06.271288 | 2018-09-05T07:05:47 | 2018-09-05T07:05:47 | 146,102,631 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6074289679527283,
"alphanum_fraction": 0.626365602016449,
"avg_line_length": 21.631868362426758,
"blob_id": "e8ad82871743a7395415edaa03ac8452f0b6decd",
"content_id": "fb0ab2b5ef5d5bd819e6161ce04cafff0257e8d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4399,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 182,
"path": "/code/kanji_to_kana.py",
"repo_name": "wowoa123/kanji-to-kana",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n\nimport configparser\nimport subprocess\nimport tkinter.messagebox\nfrom multiprocessing import Process\n\nfrom pynput import keyboard\nfrom pynput.keyboard import Listener\n\nimport mecab\nfrom area_select import *\n\n# 创建tkinter主窗口\nroot = tkinter.Tk()\n# 指定主窗口位置与大小\nroot.geometry('300x100+400+300')\n# 不允许改变窗口大小\nroot.resizable(False, False)\n# 窗口标题\nroot.title(\"kanji to kana\")\n# 窗口图标\nroot.iconbitmap('favicon.ico')\nroot.attributes('-topmost', True)\n\nexist_p = False\np_pos = None\n\nconfig_name = 'config.ini'\n\n\ndef config_save(option, value):\n cf = configparser.ConfigParser()\n cf.read(config_name)\n cf.set('config', option, value)\n with open(config_name, 'r+') as f:\n cf.write(f)\n\n\ndef config_get(option):\n cf = configparser.ConfigParser()\n cf.read(config_name)\n return cf.get(\"config\", option)\n\n\ndef show_text(sentence):\n text = tkinter.Tk()\n text.overrideredirect(True)\n text.geometry('1000x150+400+50')\n w = tkinter.Label(text, text=sentence)\n w.pack()\n\n\ndef on_any_press(key):\n if key in keyboard.Key:\n return True\n else:\n c = key.char\n if (' ' < c < 'A') or ('Z' < c < 'a') or ('z' < c < 127):\n return True\n elif 'a' <= c.lower() <= 'z':\n return True\n return False\n\n\ndef on_press1(key):\n if on_any_press(key):\n config_save('hotkey', str(key))\n return False\n\n\ndef on_press2(key):\n if on_any_press(key):\n config_save('stop', str(key))\n return False\n\n\ndef on_press(key):\n pass\n\n\ndef on_release(key):\n pass\n\n\ndef hotkey_release(key):\n c = str(key)\n hotkey = config_get('hotkey')\n stop = config_get('stop')\n area = config_get('area')\n area = area[1:-1]\n area = area.split(',')\n if c == hotkey:\n result = mecab.to_kana(int(area[0]), int(area[2]), int(area[1]), int(area[3]))\n global exist_p\n global p_pos\n if not exist_p:\n p = subprocess.Popen(['pythonw', 'text_area.py', result])\n p_pos = p\n exist_p = True\n else:\n p = subprocess.Popen(['pythonw', 'text_area.py', result])\n p_pos.terminate()\n p_pos = p\n elif c == stop:\n return False\n\n\ndef buttonCaptureClick():\n # 最小化主窗口\n # root.state('icon')\n # sleep(0.2)\n\n filename = 'temp.png'\n im = ImageGrab.grab()\n im.save(filename)\n im.close()\n # 显示全屏幕截图\n w = MyCapture(root, filename)\n ButtonCapture.wait_window(w.top)\n\n # print(w.myleft,w.mybottom)\n # 截图结束,恢复主窗口,并删除临时的全屏幕截图文件\n # label.config(text='Hello')\n root.state('normal')\n os.remove(filename)\n\n config_save('area', str(w.pos_get()))\n\n return True\n\n\ndef buttonHotKeyClick():\n # 监听\n with Listener(on_press=on_press1, on_release=on_release) as listener:\n listener.join()\n # 提示\n tkinter.messagebox.showinfo('', config_get('hotkey') + '键已被设为默认键')\n\n return True\n\n\ndef buttonHookOnClick():\n with Listener(on_press=on_press, on_release=hotkey_release) as listener:\n listener.join()\n\n\ndef buttonHookOffClick():\n with Listener(on_press=on_press2, on_release=on_release) as listener:\n listener.join()\n # 提示\n tkinter.messagebox.showinfo('', config_get('stop') + '键已被设为停止键')\n\n return True\n\n\nif not os.path.exists(config_name):\n cf = configparser.ConfigParser()\n cf.add_section('config')\n cf.set(\"config\", \"area\", \" \")\n cf.set(\"config\", \"hotkey\", \" \")\n cf.set(\"config\", \"stop\", \" \")\n with open(config_name, 'w') as f:\n cf.write(f)\n\n# 截图区域按钮\nButtonCapture = tkinter.Button(root, text='截图区域选择', command=buttonCaptureClick)\nButtonCapture.place(x=35, y=10, width=100, height=30)\n\n# 截图快捷键设定按钮\nButtonHotKey = tkinter.Button(root, text='截图快捷键设定', command=buttonHotKeyClick)\nButtonHotKey.place(x=35, y=60, width=100, height=30)\n\n# 监听开始按钮\nButtonHookOn = tkinter.Button(root, text='开始转换', command=buttonHookOnClick)\nButtonHookOn.place(x=175, y=10, width=100, height=30)\n\n# 停止按钮\nButtonHookOff = tkinter.Button(root, text='停止快捷键设定', command=buttonHookOffClick)\nButtonHookOff.place(x=175, y=60, width=100, height=30)\n\nroot.mainloop()\n"
},
{
"alpha_fraction": 0.536697268486023,
"alphanum_fraction": 0.5756880640983582,
"avg_line_length": 23.27777862548828,
"blob_id": "989d402b45e9afc681c64dff0dd7596441a3ec8c",
"content_id": "fd0c5c1c76fc3032b51625a9160bd0d737306436",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 18,
"path": "/code/text_area.py",
"repo_name": "wowoa123/kanji-to-kana",
"src_encoding": "UTF-8",
"text": "import sys\nimport tkinter\n\n\ndef text(master, sentence):\n master.geometry('1000x150+400+50')\n master.overrideredirect(False)\n master.attributes('-topmost', True,\n '-alpha', 0.5,\n '-toolwindow', True)\n label = tkinter.Label(master, text=sentence, font=(\"Arial\", 25))\n label.pack()\n\n\nif __name__ == '__main__':\n root = tkinter.Tk()\n text(root, sys.argv[1])\n root.mainloop()"
},
{
"alpha_fraction": 0.5898520350456238,
"alphanum_fraction": 0.6088795065879822,
"avg_line_length": 20.0222225189209,
"blob_id": "5c9a805a811d818a110eff2a453611024e6b30de",
"content_id": "5ccd1a82d20a7cf5195c6ea8ccb6bf57767a3efc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1086,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 45,
"path": "/code/mecab.py",
"repo_name": "wowoa123/kanji-to-kana",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport sys\nimport MeCab\nimport pytesseract\nfrom PIL import Image, ImageGrab\nimport os\n\n\ndef wbimage(png):\n img = Image.open(png)\n\n # 模式L”为灰色图像,它的每个像素用8个bit表示,0表示黑,255表示白,其他数字表示不同的灰度。\n Img = img.convert('L')\n Img.save(\"gray.png\")\n\n # 自定义灰度界限,大于这个值为黑色,小于这个值为白色\n threshold = 200\n\n table = []\n for i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n\n # 图片二值化\n photo = Img.point(table, '1')\n name = 'wb.png'\n photo.save(name)\n os.remove(png)\n os.remove('gray.png')\n return name\n\n\ndef to_kana(left, top, right, bottom):\n png = 'cache.png'\n image = ImageGrab.grab((left, top, right, bottom))\n image.save(png)\n png2 = wbimage(png)\n image = Image.open(png2)\n sentence = pytesseract.image_to_string(image, lang='jpn')\n os.remove(png2)\n mecab = MeCab.Tagger(\"-Oyomi\")\n return mecab.parse(sentence)\n"
},
{
"alpha_fraction": 0.5449122786521912,
"alphanum_fraction": 0.5491228103637695,
"avg_line_length": 32.51764678955078,
"blob_id": "6c58d7c4dd08b1b259e14fd3ac470520b203df38",
"content_id": "2b42f2bc46e51a40a23c71e558cd675adaecbed4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3216,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 85,
"path": "/code/area_select.py",
"repo_name": "wowoa123/kanji-to-kana",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\nimport tkinter\nimport tkinter.filedialog\nimport os\nfrom PIL import ImageGrab\nfrom time import sleep\nfrom tkinter import StringVar, IntVar\n\n#截图区域选择\nclass MyCapture:\n def __init__(self, root, png):\n # 变量X和Y用来记录鼠标左键按下的位置\n self.X = tkinter.IntVar(value=0)\n self.Y = tkinter.IntVar(value=0)\n\n self.selectPosition = None\n # 屏幕尺寸\n screenWidth = root.winfo_screenwidth()\n # print(screenWidth)\n screenHeight = root.winfo_screenheight()\n # print(screenHeight)\n # 创建顶级组件容器\n self.top = tkinter.Toplevel(root, width=screenWidth, height=screenHeight)\n # 不显示最大化、最小化按钮\n self.top.overrideredirect(True)\n self.canvas = tkinter.Canvas(self.top, bg='white', width=screenWidth, height=screenHeight)\n # 显示全屏截图,在全屏截图上进行区域截图\n self.image = tkinter.PhotoImage(file=png)\n self.canvas.create_image(screenWidth // 2, screenHeight // 2, image=self.image)\n\n # 鼠标左键按下的位置\n def onLeftButtonDown(event):\n self.X.set(event.x)\n self.Y.set(event.y)\n # 开始截图\n self.sel = True\n\n self.canvas.bind('<Button-1>', onLeftButtonDown)\n\n # 鼠标左键移动,显示选取的区域\n def onLeftButtonMove(event):\n if not self.sel:\n return\n global lastDraw\n try:\n # 删除刚画完的图形,要不然鼠标移动的时候是黑乎乎的一片矩形\n self.canvas.delete(lastDraw)\n except Exception as e:\n pass\n lastDraw = self.canvas.create_rectangle(self.X.get(), self.Y.get(), event.x, event.y, outline='black')\n\n self.canvas.bind('<B1-Motion>', onLeftButtonMove)\n\n # 获取鼠标左键抬起的位置,保存区域截图\n def onLeftButtonUp(event):\n self.sel = False\n try:\n self.canvas.delete(lastDraw)\n except Exception as e:\n pass\n sleep(0.1)\n # 考虑鼠标左键从右下方按下而从左上方抬起的截图\n myleft, myright = sorted([self.X.get(), event.x])\n mytop, mybottom = sorted([self.Y.get(), event.y])\n self.selectPosition = (myleft, myright, mytop, mybottom)\n # pic = ImageGrab.grab((left+1, top+1, right, bottom))\n #\n # #弹出保存截图对话框\n #\n # fileName = tkinter.filedialog.asksaveasfilename(title='保存截图', filetypes=[('JPG files', '*.jpg')])\n #\n # if fileName:\n #\n # pic.save(fileName+'.jpg')\n # 关闭当前窗口\n # print(left, ' ', top,' ',right,' ',bottom)\n\n self.top.destroy()\n\n self.canvas.bind('<ButtonRelease-1>', onLeftButtonUp)\n self.canvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)\n # 开始截图\n\n def pos_get(self):\n return self.selectPosition\n\n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 15,
"blob_id": "5da099a7654d4bf942533ed2e00ae6e5e90dcbf1",
"content_id": "c9ac456c296327342833210687b40f358ef7caa6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 15,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 1,
"path": "/README.md",
"repo_name": "wowoa123/kanji-to-kana",
"src_encoding": "UTF-8",
"text": "# kanji-to-kana"
}
] | 5 |
Scottnan/Args
|
https://github.com/Scottnan/Args
|
6db29768ba1ea03935342117d5d9b09a60cf08f5
|
e510d9b54cbc7d3c244b0883b79a59522bc55ad5
|
d383936025d67dca031fef0a58d8e0282eddf124
|
refs/heads/master
| 2020-08-12T14:16:32.253030 | 2019-10-18T13:29:00 | 2019-10-18T13:29:00 | 214,781,926 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5231379270553589,
"alphanum_fraction": 0.5253415703773499,
"avg_line_length": 28.102563858032227,
"blob_id": "671ceddbd74c97016b613df466bade49705def21",
"content_id": "3e76e1d965b0673a5f5e3f75c19db1f91ad4bb30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2269,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 78,
"path": "/main/Parser.py",
"repo_name": "Scottnan/Args",
"src_encoding": "UTF-8",
"text": "import re\nimport copy\n\n\nclass ArgsParser(object):\n def __init__(self, command):\n self._schema = None\n self._command = Command(command)\n\n @property\n def schema(self):\n return self._schema\n\n @schema.setter\n def schema(self, schema_as_text):\n if not isinstance(schema_as_text, str):\n raise TypeError(\"expected a string\")\n self._schema = Schema(schema_as_text)\n\n def _convert_str_to_arg_type(self, arg):\n try:\n if self._schema.get_type(arg) == 'bool':\n if self._command.get_value(arg) is None:\n return True\n else:\n return bool(self._command.get_value(arg))\n elif self._schema.get_type(arg) == 'int':\n return int(self._command.get_value(arg))\n else:\n return self._command.get_value(arg)\n except ValueError:\n print(\"please check type of %s\" % arg)\n\n def get_args(self, arg):\n res = self._convert_str_to_arg_type(arg)\n return res\n\n\nclass Command(object):\n def __init__(self, command_line):\n self.arg_dict = {}\n command_iter = iter(re.split(r\"\\s+\", command_line))\n try:\n while True:\n arg = next(command_iter)[1]\n previous, command = copy.deepcopy(command_iter), next(command_iter)\n if self._is_value(command):\n self.arg_dict[arg] = command\n else:\n command_iter = previous\n except StopIteration:\n pass\n\n def get_value(self, arg):\n return self.arg_dict.get(arg, None)\n\n @staticmethod\n def _is_value(command):\n if command[0] != '-':\n return True\n elif \"0\" <= command[1] <= \"9\":\n return True\n else:\n return False\n\n\nclass Schema(object):\n def __init__(self, schema_as_text):\n schema = schema_as_text.split(\" \")\n self.schema_dict = {}\n for i in schema:\n key, val = i.split(\":\")\n self.schema_dict[key] = val\n\n def get_type(self, arg):\n if arg not in self.schema_dict.keys():\n raise ValueError(\"unexcept argument %d\" % arg)\n return self.schema_dict[arg]"
},
{
"alpha_fraction": 0.6240963935852051,
"alphanum_fraction": 0.6240963935852051,
"avg_line_length": 26.66666603088379,
"blob_id": "ab00b7fcfcb6ef46589d89ee873547c6ff4f52dc",
"content_id": "bd1d38a3b0121f236c5925e273a2c3f8eef839be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 15,
"path": "/tests/test_schema.py",
"repo_name": "Scottnan/Args",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom main.Parser import Schema\n\n\nclass MyTestCase(unittest.TestCase):\n def test_set_schema(self):\n schema_as_text = \"l:bool p:int d:str\"\n schema = Schema(schema_as_text)\n self.assertEqual(\"bool\", schema.get_type(\"l\"))\n self.assertEqual(\"int\", schema.get_type(\"p\"))\n self.assertEqual(\"str\", schema.get_type(\"d\"))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.593478262424469,
"alphanum_fraction": 0.613043487071991,
"avg_line_length": 34.38461685180664,
"blob_id": "55e803080a7e6e37fd5f59ed33a704cf20ff2ab1",
"content_id": "f6980732f3baac1dd5fe6de4dbaa60312f729f6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 920,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 26,
"path": "/tests/test_command.py",
"repo_name": "Scottnan/Args",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom main.Parser import Command\n\n\nclass MyTestCase(unittest.TestCase):\n def test_has_value(self):\n command = Command(\"-l True -p 8080 -d /usr/logs\")\n self.assertEqual(\"True\", command.get_value(\"l\"))\n self.assertEqual(\"8080\", command.get_value(\"p\"))\n self.assertEqual(\"/usr/logs\", command.get_value(\"d\"))\n\n def test_no_value(self):\n command = Command(\"-l -p 8080 -d /usr/logs\")\n self.assertEqual(None, command.get_value(\"l\"))\n self.assertEqual(\"8080\", command.get_value(\"p\"))\n self.assertEqual(\"/usr/logs\", command.get_value(\"d\"))\n\n def test_has_negative_value(self):\n command = Command(\"-l -p -9 -d /usr/logs\")\n self.assertEqual(None, command.get_value(\"l\"))\n self.assertEqual(\"-9\", command.get_value(\"p\"))\n self.assertEqual(\"/usr/logs\", command.get_value(\"d\"))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6270871758460999,
"alphanum_fraction": 0.6419295072555542,
"avg_line_length": 28.94444465637207,
"blob_id": "1d537418249c525258d0dbe53fc19f08d682b85f",
"content_id": "e1bdfe8a8f6e37b34e7f2e230aea8197d9f3c278",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 18,
"path": "/tests/test_args.py",
"repo_name": "Scottnan/Args",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom main.Parser import ArgsParser\n\n\nclass MyTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.args_parser = ArgsParser(\"-l -p 8080 -d /usr/logs\")\n cls.args_parser.schema = \"l:bool p:int d:str\"\n\n def test_get_args_value(self):\n self.assertEqual(True, self.args_parser.get_args(\"l\"))\n self.assertEqual(8080, self.args_parser.get_args(\"p\"))\n self.assertEqual(\"/usr/logs\", self.args_parser.get_args(\"d\"))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
}
] | 4 |
FloWPs/inc_dec
|
https://github.com/FloWPs/inc_dec
|
a959cf74092f00c1ecaeed65c65721bd562d61b2
|
b2f7d690c25c484de342277ec29166dadff9c122
|
d8afcf81347f63ab8414c826712e5b4bb3681cd6
|
refs/heads/master
| 2023-02-09T02:27:52.782579 | 2021-01-05T13:24:03 | 2021-01-05T13:24:03 | 274,506,771 | 1 | 0 | null | 2020-06-23T20:53:23 | 2020-06-23T21:36:06 | 2021-01-04T14:11:12 |
Python
|
[
{
"alpha_fraction": 0.78125,
"alphanum_fraction": 0.78125,
"avg_line_length": 15,
"blob_id": "61175f9d0dba21e755062c0a84f857309925b625",
"content_id": "5a01f1ec5bd4028d692782fcdcfe34d45a4b32e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/README.md",
"repo_name": "FloWPs/inc_dec",
"src_encoding": "UTF-8",
"text": "# inc_dec\nSmall package example\n"
},
{
"alpha_fraction": 0.7099999785423279,
"alphanum_fraction": 0.7099999785423279,
"avg_line_length": 24,
"blob_id": "a7fa287dd493bf44160f1f1b0e2ddc146be1667f",
"content_id": "db4063e64d12a94d015506c45545af94b5fa7cdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 4,
"path": "/inc_dec/__init__.py",
"repo_name": "FloWPs/inc_dec",
"src_encoding": "UTF-8",
"text": "from .inc_dec import increment\nfrom .inc_dec import decrement\n\n__all__ = ['increment', 'decrement']\n"
},
{
"alpha_fraction": 0.6612021923065186,
"alphanum_fraction": 0.6830601096153259,
"avg_line_length": 15.636363983154297,
"blob_id": "5d00b892ef0e5dc54bce8c52f6525fa17d5f3b4c",
"content_id": "40db2cbc93d080afd9240002fbd93149c3e7e606",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 183,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 11,
"path": "/inc_dec/tests/test_pytest.py",
"repo_name": "FloWPs/inc_dec",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom inc_dec import increment, decrement # The code to test\n\n\ndef test_increment():\n assert increment(3) == 4\n\n\ndef test_decrement():\n assert decrement(3) == 2\n"
}
] | 3 |
jcenmiami/COP3375-Assignment01
|
https://github.com/jcenmiami/COP3375-Assignment01
|
a4cccc925b609d3b41f2eeade5940e45f0563f23
|
7467ac710b868f187bcd97a175515b5c570119d5
|
330b9f35a01dbf78b1671967233a29a9c08e938b
|
refs/heads/master
| 2021-01-11T21:11:47.853222 | 2017-01-17T20:17:29 | 2017-01-17T20:17:29 | 79,267,492 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7669441103935242,
"alphanum_fraction": 0.7782402038574219,
"avg_line_length": 97.88235473632812,
"blob_id": "a1166bae126b57f07d5e640d89c97173584cd147",
"content_id": "0ebe99860b52b7a83d07904bdb294ca174064a58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1682,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 17,
"path": "/README.md",
"repo_name": "jcenmiami/COP3375-Assignment01",
"src_encoding": "UTF-8",
"text": "# COP3375-Assignment01\nIn this assignment, we will develop an algorithm to calculate the final grade for a class of 15 students. \nThere are 5 assignments (20 points each) as well a midterm and a final (100 points each). \nAnother 10 points are for in-class participation. \nAll the assignments and tests contribute to the final grade equally. \nAdditionally, the in-class participation (either thru Discussions or in real-time) is also a part of the final grade. \nThis is a fairly simple assignment and we will use it to get into the habit of following good design and coding practices. \nFor brief examples of good documentation review some of the material at: \nhttp://noeticforce.com/best-free-tutorials-to-learn-python-pdfs-ebooks-online-interactive . (Links to an external site.) \nFirst, we analyze the problem (perhaps break it into a couple of pieces), develop a flow-chart or pseudocode. \nInclude the pseudocode in your program as comments. (5 points) \nNext, we code the algorithm in Python along with comments and documentation, and make sure it runs successfully. \nFor the code, we define a MAIN program (or driver) as well as a few functions and methods (e.g. get_grades, cumulate_total_points, determine_final_grade, ....) \nThese should be invoked by the driver via function calls and with arguments passed back and forth. (13 points) \nIn the end we display all the students net-ID and their final grade in a table format. \nSince this is an interactive program, make sure we display meaningful headings and instructions/prompts to the end-user. \nAlso please up load the .py file for me to execute. Note: the .py file should be uploaded - not your executed code. \n"
},
{
"alpha_fraction": 0.6165925860404968,
"alphanum_fraction": 0.632888913154602,
"avg_line_length": 32.093135833740234,
"blob_id": "2eec56ef62368ca6e75273f2b21b8dbd9350334c",
"content_id": "80d17d6e8249cff85fe55d79cd1ba15fb1d42f6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6750,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 204,
"path": "/com/ezitapps/COP3375-Assignment01.py",
"repo_name": "jcenmiami/COP3375-Assignment01",
"src_encoding": "UTF-8",
"text": "'''\nCreated on Jan 16, 2017\n\n@author: juanramirez\n'''\nimport string\nfrom random import randint\nfrom builtins import int\n\n### This code outline only processes for one student. To process for more we need to put this code in a loop.\n### To generate a report for the entire class, we need to do the following:\n### In the main body of the driver, set up a loop to process each student\n### - Determine the final grade, and capture the student name & ID, as well as the final grade\n### (i.e. pretty much pull in all the code given below).\n### - To print the table at the end, we need to capture student information as we process it.\n### The easiest way to do this is to keep appending it in individual lists \n### for student name, ID and final grade\n### - alternatively, we can also do this by storing this information in a list of tuples,\n### with one tuple per student,e.g.[(st1, id1, grade1), (st2, id2, grade2),....\n### Note: the latter will require additional formatting.\n###\n### When all the students have been processed, we exit the processing loop.\n### Now we will need to set up another simple loop to print the table.\n### We should print an appropriate header (outside of this loop) and then retrieve each line item from the lists (or list of tuples)\n\n\n\n\ndef add_grades(assignment):\n print(\"Calculates the total points for all 5 assignments\")\n # capture grades for all 5 assignments and compute total\n # add other print sttements as needed to capture tests and other scores\n # at end, return total score\n \n assignmentGrades = 0\n \n \n for assignmentValue in assignment:\n assignmentGrades = assignmentGrades + assignmentValue\n \n \n return assignmentGrades\n \n \n \ndef letterGrade(total_score):\n # calcualte and print total percetage\n # calculate the correponding letter grade (recall we did an example of this in class)\n finalGrade = round((total_score / 310)*100)\n \n print(str(finalGrade) + \"%\")\n \n if finalGrade >= 90 and finalGrade <= 100:\n return(\"A\")\n\n elif finalGrade >= 80 and finalGrade < 90:\n return(\"B\")\n\n elif finalGrade >= 70 and finalGrade < 80:\n return(\"C\")\n\n elif finalGrade >= 60 and finalGrade < 70:\n return(\"D\")\n\n else:\n return(\"F\")\n \n\n\n \n \n# To facilitate input of grade\n# Created a function that automatically inputs a grade. \ndef automateGrade(lowRange, highRange, amount):\n \n assignmentValue = []\n \n for interval in range(amount):\n assignmentValue.append(randint(lowRange,highRange)) \n \n return assignmentValue\n\n\n\n\ndef main(): \n print(\"Welcome to calculate your grade program\")\n \n addStudent = \"Y\"\n studentNumber = 0\n studentList = []\n \n ### Start of While Loop!!!\n while addStudent == \"Y\":\n print(\"Add a Student\")\n \n \n \n # Input Student ID and Student Name\n # Not Automated!!!\n studentID = input(\"Enter Student ID: \")\n studentName = input(\"Enter Student Name: \")\n # Create object of Student Class.\n studentList.append(Student(studentID, studentName))\n print(studentList[studentNumber].studentName + \" has been added\")\n \n \n \n # Automate the addition of random grades. \n \n # Automate the assignment grades\n studentList[studentNumber].assignment = automateGrade(15, 20, 5)\n print(\"Five Assignment Grades are:\" , studentList[studentNumber].assignment)\n \n # Automate grade assignment for Midterm, Final and Participation\n # Print the result to make sure it's ok. \n studentList[studentNumber].midterm = randint(80,101)\n print(\"Midterm Exam Grade is:\",studentList[studentNumber].midterm)\n studentList[studentNumber].final = randint(80,101)\n print(\"Final Exam Grade is:\",studentList[studentNumber].final)\n studentList[studentNumber].participation = randint(8,11)\n print(\"Participation Grade is:\",studentList[studentNumber].participation)\n \n # Add all Assignments and save it to the student object.\n internalAssignmentSum = add_grades(studentList[studentNumber].assignment)\n studentList[studentNumber].assignmentSum = internalAssignmentSum\n #print(studentList[studentNumber].assignmentSum, internalAssignmentSum)\n studentList[studentNumber].totalGradeSum()\n \n # Assign a letter grade to the object. \n studentList[studentNumber].letterGrade = letterGrade(studentList[studentNumber].totalGrade)\n \n \n # Increment student amount\n studentNumber = studentNumber + 1\n \n # Request to see if you want to add another student. \n print(\"You have \" + str(studentNumber) + \" students in the list.\")\n addStudent = input(\"Do you want to add another student? Y or N: \").upper() \n \n #### END OF WHILE LOOP!!!\n \n #print(studentList.__len__())\n print(\"\\n\\nPrint Grade Results for Students:\")\n \n students = len(studentList)\n \n # Header for printed results\n formatTemplate = \"{0:15}{1:15}{2:15}{3:15}{4:15}{5:15}{6:15}{7:15}\"\n print(formatTemplate.format(\"ID\", \"Name\" , \"Assign.\", \"Midterm\" , \"Final\", \"Part.\", \n \"Total\", \"Grade\"))\n \n # Loop through all students and print the results. \n # Print the grades in a correct format.\n for student in range(students):\n studentList[student].printstudent()\n \n \n \n \n\n \n\n# Student Class where we have the values for an individual student.\n# Information to store for each student. \nclass Student:\n \n # Student Object Variables\n studentID = \"\"\n studentName = \"\"\n letterGrade = \"\"\n \n assignment = []\n assignmentSum = 0\n \n midterm = 0\n final = 0\n participation = 0\n \n totalGrade = 0\n \n # Format for output.\n formatTemplate = \"{0:15}{1:15}{2:5d}{3:15d}{4:14d}{5:14d}{6:16d}{7:>14}\"\n \n # Constructor\n def __init__(self, studentId, studentName):\n self.studentID = studentId\n self.studentName = studentName\n \n # Print the student information. Output\n def printstudent(self):\n #self.assignmentSum\n print (self.formatTemplate.format(self.studentID, self.studentName,self.assignmentSum,self.midterm,self.final, \n self.participation, self.totalGrade, self.letterGrade))\n \n # Add all scores. \n def totalGradeSum (self):\n self.totalGrade = self.assignmentSum + self.midterm + self.final + self.participation\n \n \n\n# Start Main.\nif __name__ == '__main__':\n main()"
}
] | 2 |
jesusjsc/weibo_login
|
https://github.com/jesusjsc/weibo_login
|
e53c758aa460e3d7299a3193e5f48692f84d86fa
|
040020ac85d427a350dff0d473997be0b66e251a
|
d5025245e6f267e3168a68a0edd52c858fc67be1
|
refs/heads/master
| 2021-01-01T02:20:52.714465 | 2020-01-07T12:50:24 | 2020-01-07T12:50:24 | 239,137,386 | 1 | 0 | null | 2020-02-08T13:34:44 | 2020-02-08T13:34:37 | 2020-01-07T12:50:25 | null |
[
{
"alpha_fraction": 0.5125501155853271,
"alphanum_fraction": 0.5309006571769714,
"avg_line_length": 31.92361068725586,
"blob_id": "c0163333e866937cc0f65e5abbd08c29175304ec",
"content_id": "e78be44223d64521ea337d2f407d001e6a0be6da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5195,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 144,
"path": "/weibo_login.py",
"repo_name": "jesusjsc/weibo_login",
"src_encoding": "UTF-8",
"text": "import re\nimport rsa\nimport json\nimport time\nimport base64\nimport binascii\nimport requests\n\n\nclass WeiboLogin:\n\n def __init__(self, username, password):\n self.session = requests.Session()\n self.headers = { # 伪装请求\n 'Referer': 'https://weibo.com/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'\n }\n self.username = username\n self.password = password\n\n def get_username(self):\n \"\"\"\n 通过base64编码获取su的值\n \"\"\"\n username_base64 = base64.b64encode(self.username.encode())\n return username_base64.decode()\n\n def get_json_data(self, su):\n \"\"\"\n 通过su参数发起第一次请求,获取pubkey和nonce的值\n \"\"\"\n url = 'https://login.sina.com.cn/sso/prelogin.php'\n timestamp = int(time.time() * 1000)\n params = {\n 'entry': 'weibo',\n 'callback': 'sinaSSOController.preloginCallBack',\n 'su': su,\n 'rsakt': 'mod',\n 'checkpin': '1',\n 'client': 'ssologin.js(v1.4.19)',\n '_': timestamp\n }\n data = self.session.get(url=url, headers=self.headers, params=params).text\n json_data = json.loads(re.findall(r'\\((.*?)\\)', data, re.S)[0])\n return json_data\n\n def get_password(self, servertime, nonce, pubkey):\n \"\"\"\n 对密码进行rsa加密\n \"\"\"\n stri = (str(servertime)+'\\t'+str(nonce)+'\\n'+self.password).encode()\n public_key = rsa.PublicKey(int(pubkey, 16), int('10001', 16))\n password = rsa.encrypt(stri, public_key)\n password = binascii.b2a_hex(password)\n return password.decode()\n\n def login_first(self):\n \"\"\"\n 发起第一次登录请求,获取登录请求跳转页redirect_login_url\n \"\"\"\n su = self.get_username()\n json_data = self.get_json_data(su)\n sp = self.get_password(json_data['servertime'], json_data['nonce'], json_data['pubkey'])\n data = {\n 'entry': 'weibo',\n 'gateway': '1',\n 'from': '',\n 'savestate': '7',\n 'qrcode_flag': 'false',\n 'useticket': '1',\n 'pagerefer': '',\n 'vsnf': '1',\n 'su': su,\n 'service': 'miniblog',\n 'servertime': json_data['servertime'],\n 'nonce': json_data['nonce'],\n 'pwencode': 'rsa2',\n 'rsakv': json_data['rsakv'],\n 'sp': sp,\n 'sr': '2560*1440',\n 'encoding': 'UTF-8',\n 'prelt': '65',\n 'url': 'https://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',\n 'returntype': 'META'\n }\n # 首次登录请求地址\n login_url = 'https://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.19)'\n response = self.session.post(url=login_url, data=data, headers=self.headers)\n response.encoding = response.apparent_encoding\n try:\n redirect_login_url = re.findall(r'replace\\(\"(.*?)\"\\)', response.text, re.S)[0]\n return redirect_login_url\n except:\n return '获取首次登录请求跳转页失败'\n\n def login_second(self):\n \"\"\"\n 发起第二次登录请求,再次获取登录请求跳转页arrURL\n \"\"\"\n # 第二次登录请求地址\n url = self.login_first()\n response = self.session.get(url, headers=self.headers)\n response.encoding = response.apparent_encoding\n try:\n arr_url = json.loads(re.findall(r'setCrossDomainUrlList\\((.*?)\\)', response.text, re.S)[0])['arrURL'][0]\n return arr_url\n except:\n return '获取第二次登录请求跳转页失败'\n\n def login_finally(self):\n \"\"\"\n 发起最终登录请求,实现登录并跳转到用户微博首页\n \"\"\"\n # 最终登录请求地址\n url = self.login_second()\n try:\n res = self.session.get(url, headers=self.headers)\n res.encoding = res.apparent_encoding\n except:\n return '登录失败,或为用户名或密码错误'\n try:\n # 获取用户id\n uid = json.loads(res.text[1:-4])['userinfo']['uniqueid']\n # 拼接用户微博首页\n user_home_url = 'https://www.weibo.com/u/{}/home'.format(uid)\n # 访问用户微博首页\n response = self.session.get(url=user_home_url, headers=self.headers)\n response.encoding = response.apparent_encoding\n title = re.findall(r'<title>(.*?)</title>', response.text, re.S)[0]\n if '我的首页' in title:\n return '登录成功'\n else:\n return '登录失败'\n except:\n return '获取最终登录请求跳转页失败'\n\n\nif __name__ == '__main__':\n # 此处输入用户名和密码\n username = ''\n password = ''\n weibo = WeiboLogin(str(username), str(password))\n # 发起模拟登录\n print(weibo.login_finally())\n"
},
{
"alpha_fraction": 0.7037037014961243,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 6.714285850524902,
"blob_id": "b16578eb6d41888f013318a43db2f9848a9fe683",
"content_id": "a1627b4539ac0ea334d778c49986bb4b59d84e81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 7,
"path": "/README.md",
"repo_name": "jesusjsc/weibo_login",
"src_encoding": "UTF-8",
"text": "# weibo_login\n实现微博模拟登录\n\n# 环境\n`\npip install requests\n`\n"
}
] | 2 |
spacer730/Neural-Networks
|
https://github.com/spacer730/Neural-Networks
|
69498b475a175c6052a55c0c5dc66918cb7bbde7
|
f755f9acf96c6de6e62a5749504c93b46e973693
|
c6855e7e2b829ca3fd89920070f54ea81d813319
|
refs/heads/master
| 2021-05-01T12:04:35.060312 | 2018-05-16T22:20:25 | 2018-05-16T22:20:25 | 121,056,487 | 0 | 0 | null | 2018-02-10T21:37:02 | 2018-02-10T21:37:02 | 2018-02-10T21:43:56 | null |
[
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.610859751701355,
"avg_line_length": 19.090909957885742,
"blob_id": "6d67bbaa0125489f06b5a616562f7a96bfb8a743",
"content_id": "a56c2d17c4a3fda2c0dd49b3ecbcb2fc5ca598b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 22,
"path": "/Assignment 0/Assignment-0_1_1.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nx_1=[0,0,1,1]\nx_2=[0,1,0,1]\nX=np.matrix([x_1,x_2])\nAND=[0,0,0,1]\nXOR=[0,1,1,0]\n\nplt.figure(1)\nplt.plot(X[0],X[1], 'ro')\nplt.title('XOR function')\nfor i in np.arange(np.shape(X)[1]):\n\tplt.annotate(str(XOR[i]),xy=(x_1[i],x_2[i]))\n\nplt.figure(2)\nplt.plot(X[0],X[1], 'ro')\nplt.title('AND function')\nfor i in np.arange(np.shape(X)[1]):\n\tplt.annotate(str(AND[i]), xy=(x_1[i],x_2[i]))\n\nplt.show()\n"
},
{
"alpha_fraction": 0.7707253694534302,
"alphanum_fraction": 0.7836787700653076,
"avg_line_length": 24.733333587646484,
"blob_id": "0c41097cb085bc7ccc0ebfa1f8781bc3a91bdc54",
"content_id": "d0e771bb038f74d03e09d992dfeb02c58bd291ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 30,
"path": "/Assignment 2/test.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "'''Trains a simple deep NN on the MNIST dataset.\n\nGets to 98.40% test accuracy after 20 epochs\n(there is *a lot* of margin for parameter tuning).\n2 seconds per epoch on a K520 GPU.\n'''\n\nfrom __future__ import print_function\n\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\ndef permute_array(array, permutation_order):\n\tpermutation = np.zeros(len(array))\n\n\tfor i in range(len(permutation)):\n\t\tpermutation[i] = array[permutation_order[i]]\n\n\treturn permutation\n"
},
{
"alpha_fraction": 0.5349740982055664,
"alphanum_fraction": 0.6139896512031555,
"avg_line_length": 19.3157901763916,
"blob_id": "49ed14d4159ac694841e706de542e37a4353afa0",
"content_id": "a8ce448764fabf3385c52cc817e5ef713c8fa8f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 38,
"path": "/Assignment 0/Assignment-0_1_4.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nx_1=np.array([0,0,1,1])\nx_2=np.array([0,1,0,1])\n\ndef sigmoid(x):\n\treturn 1/(1+np.exp(-x))\n\ndef cutoff(x):\n\tfor i in np.arange(len(x)):\n\t\tif x[i]>=0.5:\n\t\t\tx[i]=1\n\t\telse:\n\t\t\tx[i]=0\n\treturn x\n\ndef model(x_1,x_2):\n\tw=np.random.randn(6,1)\n\tt_1=sigmoid(w[0,0]*x_1+w[1,0]*x_2)\n\tt_2=sigmoid(w[2,0]*x_1+w[3,0]*x_2)\n\ty=sigmoid(w[4,0]*t_1+w[5,0]*t_2)\n\ty_hat=cutoff(y)\n\treturn y_hat\n\nAND_counter=0\nXOR_counter=0\n\nfor i in np.arange(1000000):\n\ty=model(x_1,x_2)\n\tif (y==np.array([0.,1.,1.,0.])).all():\n\t\tXOR_counter+=1\n\n\tif (y==np.array([0.,0.,0.,1.])).all():\n\t\tAND_counter+=1\n\nprint('Generated XOR function '+str(XOR_counter)+' times out of one million')\nprint('Generated AND function '+str(AND_counter)+' times out of one million')\n"
},
{
"alpha_fraction": 0.6606361865997314,
"alphanum_fraction": 0.6833001971244812,
"avg_line_length": 32.75838851928711,
"blob_id": "5a17a913c4e24db881e4928830b97d441c1300a5",
"content_id": "2780ca5b99d01f5823dd0117172feaaffb36b79f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5030,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 149,
"path": "/Assignment 2/Task 1/Assignment-2_1_CNN.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "'''Trains a simple deep NN on the MNIST dataset.\n\nGets to 98.40% test accuracy after 20 epochs\n(there is *a lot* of margin for parameter tuning).\n2 seconds per epoch on a K520 GPU.\n'''\n\nfrom __future__ import print_function\n\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\nbatch_size = 128\nnum_classes = 10\nepochs = 12\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\n#Change loss to keras.losses.mse to use MSE error function\nmodel.compile(loss=keras.losses.mse,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\nclass_names = np.array([0,1,2,3,4,5,6,7,8,9])\ny_pred = model.predict_classes(x_test)\ny_test_cnf = [np.argmax(elt) for elt in y_test]\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test_cnf, y_pred)\n\ntotal = np.sum(cnf_matrix, axis=1)\ndifference = [total[i] - cnf_matrix[i,i] for i in range(10)]\npercentage_misclassified = 100*(difference/total)\n\nnumberone = np.argmax(percentage_misclassified)\n\npercentage_misclassified2 = 100*(difference/total)\npercentage_misclassified2[numberone] = 0 \nnumbertwo = np.argmax(percentage_misclassified2)\n\npercentage_misclassified3 = 100*(difference/total)\npercentage_misclassified3[numberone] = 0\npercentage_misclassified3[numbertwo] = 0\nnumberthree = np.argmax(percentage_misclassified3)\n\nprint('Top 3 misclassified digits are:')\nprint('The #1 most misclassified digit is: ' + str(numberone) + ' with a percentage of: ' +str(percentage_misclassified[numberone]) )\nprint('The #2 most misclassified digit is: ' + str(numbertwo) + ' with a percentage of: ' +str(percentage_misclassified[numbertwo]) )\nprint('The #3 most misclassified digit is: ' + str(numberthree) + ' with a percentage of: ' +str(percentage_misclassified[numberthree]) )\n\n# Plot non-normalized confusion matrix\nnp.set_printoptions(precision=2)\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\n\nplt.show()\n"
},
{
"alpha_fraction": 0.6853018999099731,
"alphanum_fraction": 0.7086927890777588,
"avg_line_length": 36.67499923706055,
"blob_id": "68a3792be4380b97f055bed9284ae9064fb3f971",
"content_id": "a5a78275c702ae805380e301343f86b80b01b7c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6028,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 160,
"path": "/Assignment 2/Task 1/Assignment-2_1_MLP_Random_Permutation.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "'''Trains a simple deep NN on the MNIST dataset.\n\nGets to 98.40% test accuracy after 20 epochs\n(there is *a lot* of margin for parameter tuning).\n2 seconds per epoch on a K520 GPU.\n'''\n\nfrom __future__ import print_function\n\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\nbatch_size = 128\nnum_classes = 10\nepochs = 20\n\n#The data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\ndef permute_array(array, permutation_order):\n\tpermutation = np.zeros(len(array))\n\n\tfor i in range(len(permutation)):\n\t\tpermutation[i] = array[permutation_order[i]]\n\n\treturn permutation\n\npermutation_order=np.random.permutation(784)\n\nx_train = x_train.reshape(60000, 784)\nx_train_permuted = np.array([permute_array(x_train[i], permutation_order) for i in range(60000)])\n\nx_test = x_test.reshape(10000, 784)\nx_test_permuted = np.array([permute_array(x_test[i], permutation_order) for i in range(10000)])\n\nx_train_permuted = x_train_permuted.astype('float32')\nx_test_permuted = x_test_permuted.astype('float32')\nx_train_permuted /= 255\nx_test_permuted /= 255\nprint(x_train_permuted.shape[0], 'permuted train samples')\nprint(x_test_permuted.shape[0], 'permuted test samples')\n\n#Convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n#Sequential is a normal MLP type structure. Dense implents operation: output = activation(dot(input, kernel) + bias)\n#Input shape is 784, because 28x28 pixels for the hand written digits\n#Dropout consists in randomly setting a fraction rate of input units to 0 at each update during training time, which helps prevent overfitting.\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_shape=(784,)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(num_classes, activation='softmax'))\n\n#Summary prints a summary of your model\nmodel.summary()\n\n#Configure the learning process before training the model. Here we specify which loss function, optimizer and a list of metrics to use.\n#Change loss to 'mse' to use MSE error\nmodel.compile(loss='mse',\n optimizer=RMSprop(),\n metrics=['accuracy'])\n\n#Here we train the model. An epoch is one forward pass and one backward pass of all the training examples.\n#The batch size is the number of training examples in one forward/backward pass. The higher the batch size, the more memory space you'll need.\n#Example: 1000 training examples, batchsize is 500 then it takes 2 iterations for one epoch to complete.\n#Integer. 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. (How to see the training process for each epoch)\nhistory = model.fit(x_train_permuted, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test_permuted, y_test))\n\n#Returns the loss value & metrics values for the model in test mode.\nscore = model.evaluate(x_test_permuted, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\nclass_names = np.array([0,1,2,3,4,5,6,7,8,9])\ny_pred = model.predict_classes(x_test_permuted)\ny_test_cnf = [np.argmax(elt) for elt in y_test]\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test_cnf, y_pred)\n\ntotal = np.sum(cnf_matrix, axis=1)\ndifference = [total[i] - cnf_matrix[i,i] for i in range(10)]\npercentage_misclassified = 100*(difference/total)\n\nnumberone = np.argmax(percentage_misclassified)\n\npercentage_misclassified2 = 100*(difference/total)\npercentage_misclassified2[numberone] = 0 \nnumbertwo = np.argmax(percentage_misclassified2)\n\npercentage_misclassified3 = 100*(difference/total)\npercentage_misclassified3[numberone] = 0\npercentage_misclassified3[numbertwo] = 0\nnumberthree = np.argmax(percentage_misclassified3)\n\nprint('')\nprint('Top 3 misclassified digits are:')\nprint('The #1 most misclassified digit is: ' + str(numberone) + ' with a percentage of: ' +str(percentage_misclassified[numberone]) )\nprint('The #2 most misclassified digit is: ' + str(numbertwo) + ' with a percentage of: ' +str(percentage_misclassified[numbertwo]) )\nprint('The #3 most misclassified digit is: ' + str(numberthree) + ' with a percentage of: ' +str(percentage_misclassified[numberthree]) )\n\n# Plot non-normalized confusion matrix\nnp.set_printoptions(precision=2)\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\n\nplt.show()\n"
},
{
"alpha_fraction": 0.6047130227088928,
"alphanum_fraction": 0.6514633297920227,
"avg_line_length": 42.13114929199219,
"blob_id": "f9e0e51b39ac5999cbdb83bbdb8870eafbaaa673",
"content_id": "586c95c670fddb48734cd643fea7ae3706290cd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2631,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 61,
"path": "/Assignment 0/Assignment-0_2_4.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nxaxis=np.linspace(0,1,100)\nx_train=np.array([np.zeros(9),np.zeros(15),np.zeros(100)])\nx_test=np.array([np.zeros(9),np.zeros(15),np.zeros(100)])\ny_train=np.array([np.zeros(9),np.zeros(15),np.zeros(100)])\ny_test=np.array([np.zeros(9),np.zeros(15),np.zeros(100)])\nn=[9,15,100]\npoly=[] #Here we will store the coefficients of the polynomials\n\nfor i in np.arange(3):\n\tx_train[i]=np.random.uniform(0,1,n[i])\n\ty_train[i]=np.random.normal(0,0.05,n[i])+0.5+0.4*np.sin(2*np.pi*x_train[i])\n\tx_test[i]=np.random.uniform(0,1,n[i])\n\ty_test[i]=np.random.normal(0,0.05,n[i])+0.5+0.4*np.sin(2*np.pi*x_test[i])\n\tfor d in np.arange(10):\n\t\tpoly.append(np.zeros((3,d+1)))\n\t\t#A=[[x_train[0]**0,...,x_train[0]**d],...,[x_train[n[i]]**0]]\n\t\t#poly[d][i]=inv(A)*x\n\n\t\tpoly[d][i]=np.polyfit(x_train[i],y_train[i],d)\n\t\tp=np.poly1d(poly[d][i])\n\t\tplt.figure(10*i+d+1)\n\t\tplt.plot(x_train[i],y_train[i],'ro')\n\t\tplt.plot(x_test[i],y_test[i],'bo')\n\t\tplt.legend(handles=[mpatches.Patch(color='red', label='Training set'),mpatches.Patch(color='blue', label='Test set')])\n\t\tplt.plot(xaxis,p(xaxis))\n\t\tplt.ylim(0,1.2)\n\t\tplt.title('Polynomial fit of degree '+str(d))\n\t\tplt.savefig('Pol. fit degree '+str(d)+' for '+str(n[i])+' datapoints.png')\n\nerror_train=np.array([np.zeros(10),np.zeros(10),np.zeros(10)])\nerror_test=np.array([np.zeros(10),np.zeros(10),np.zeros(10)])\nreg_error=np.array([np.zeros(10),np.zeros(10),np.zeros(10)])\nMSE_train=np.array([np.zeros(10),np.zeros(10),np.zeros(10)])\nMSE_test=np.array([np.zeros(10),np.zeros(10),np.zeros(10)])\n#MSE_REG=np.array([np.zeros(10),np.zeros(10),np.zeros(10)])\n#reg_parameter=\n\nfor i in np.arange(3):\n\tfor d in np.arange(10):\n\t\tfor k in np.arange(n[i]):\n\t\t\terror_train[i][d]=error_train[i][d]+(np.polyval(poly[d][i],x_train[i])[k]-y_train[i][k])**2\n\t\t\terror_test[i][d]=error_test[i][d]+(np.polyval(poly[d][i],x_test[i])[k]-y_test[i][k])**2\n\t\t\t#reg_error[i][d]=reg_error[i][d]+(poly[d][i][k])**2\n\t\tMSE_train[i][d]=(1/(float(n[i]))*error_train[i][d]\n\t\tMSE_test[i][d]=(1/float(n[i]))*error_test[i][d]\n\t\t#MSE_REG[i][d]=MSE[i][d]+reg_parameter*reg_error[i][d]\n\tplt.figure(31+i)\n\tplt.plot(np.arange(10),MSE_train[i],'ro')\n\tplt.plot(np.arange(10),MSE_test[i],'bo')\n\tplt.legend(handles=[mpatches.Patch(color='red', label='Training set'),mpatches.Patch(color='blue', label='Test set')])\n\tplt.ylabel('MSE')\n\tplt.xlabel('Polynomial degree d')\n\tplt.title('Dataset with '+str(n[i])+' datapoints')\n\tplt.savefig('MSE for training and test sets with '+str(n[i])+' points against polynomial degree.png')\n\t\n#plt.show()\n"
},
{
"alpha_fraction": 0.7242308855056763,
"alphanum_fraction": 0.7519822120666504,
"avg_line_length": 38.91139221191406,
"blob_id": "6f9e9aa6361a0e87f2b6138f3b7c826c77545f38",
"content_id": "e3eab6ab3a60e3566bab2a5c973dc12b08a7bf2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6306,
"license_type": "no_license",
"max_line_length": 366,
"num_lines": 158,
"path": "/Assignment 1/Assignment-1_3.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport numpy.ma as ma\nimport csv\n\ntraindigitsoflines=[[] for i in range(10)] #Store which lines belong to which digits\nntrain=np.zeros(10) #Store number of training points for each digit\n\nwith open('train_out.csv') as train_out:\n\treadtrain_out = csv.reader(train_out, delimiter=',')\n\tfor row in readtrain_out:\n\t\tfor i in range(10):\n\t\t\tif int(row[0])==i:\n\t\t\t\ttraindigitsoflines[i].append(readtrain_out.line_num) #Read out which lines belong to which digits\n\nntrain=[len(traindigitsoflines[i]) for i in range(10)] #Compute number of training points for each digit\n\n#We want to distinguish between 1 and 8. The X we will compute is the number of activated pixels. We will first compute the average activation number for 1 and 8 and then determine a good boundary between them. If the number X for a certain row is smaller than this boundary we will predict it is a 1 and if it is larger than the boundary we will predict it as an 8.\n\nactivation1=[]\nactivation8=[]\n\n#Check for each row in train_in whether it is a 1 or an 8 and store the sum of the pixel values.\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tfor i in range(ntrain[1]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[1][i]:\n\t\t\t\tcomputeactivation1=0\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tcomputeactivation1+=float(row[j])\n\t\t\t\tactivation1.append(computeactivation1)\n\n\t\tfor i in range(ntrain[8]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[8][i]:\n\t\t\t\tcomputeactivation8=0\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tcomputeactivation8+=float(row[j])\n\t\t\t\tactivation8.append(computeactivation8)\n\n#convert to numpy arrays so we can easily calculate mean and standard deviation\nactivation1=np.array(activation1)\nactivation8=np.array(activation8)\n\n#Compute the average activation of the digits\naverage1=np.mean(activation1)\naverage8=np.mean(activation8)\n\nstd1=np.std(activation1)\nstd8=np.std(activation8)\n\nlabda=std1/(std1+std8)\nboundary=average1+std1+(abs(average1-average8)-std1-std8)*labda\n\ntrainpredictionactivation=[]\n#Compute activation for the row if it is digit 1 or 8 and then if it is larger than boundary it is 8 and smaller is 1\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tactivation=0\n\t\tfor i in range(ntrain[1]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[1][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,8])\n\t\t\n\t\tfor i in range(ntrain[8]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[8][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,8])\n\ntraintrue=[]\ntraincounteractivation1=0\ntraincounteractivation8=0\n\n#Compare predicted digits with real digits and determine accuracy\nfor i in range(len(trainpredictionactivation)):\n\tif trainpredictionactivation[i][1]==1:\n\t\tfor j in range(len(traindigitsoflines[1])):\n\t\t\tif trainpredictionactivation[i][0]==traindigitsoflines[1][j]:\n\t\t\t\ttraincounteractivation1+=1\n\telif trainpredictionactivation[i][1]==8:\n\t\tfor j in range(len(traindigitsoflines[8])):\n\t\t\tif trainpredictionactivation[i][0]==traindigitsoflines[8][j]:\n\t\t\t\ttraincounteractivation8+=1\n\ntrainaccuracy1=traincounteractivation1/ntrain[1]\ntrainaccuracy8=traincounteractivation8/ntrain[8]\ntrainaccuracy=(traincounteractivation1+traincounteractivation8)/(ntrain[1]+ntrain[8])\n\nprint(\"The train accuracy for correctly classifying the digit 1 is \"+str(trainaccuracy1))\nprint(\"The train accuracy for correctly classifying the digit 8 is \"+str(trainaccuracy8))\nprint(\"The train accuracy for correctly classifying the digits 1 and 8 is \"+str(trainaccuracy))\n\ntestdigitsoflines=[[] for i in range(10)] #Store which lines belong to which digits\nntest=np.zeros(10) #Store number of training points for each digit\n\nwith open('test_out.csv') as test_out:\n\treadtest_out = csv.reader(test_out, delimiter=',')\n\tfor row in readtest_out:\n\t\tfor i in range(10):\n\t\t\tif int(row[0])==i:\n\t\t\t\ttestdigitsoflines[i].append(readtest_out.line_num) #Read out which lines belong to which digits\n\nntest=[len(testdigitsoflines[i]) for i in range(10)] #Compute number of training points for each digit\n\ntestpredictionactivation=[]\n#Compute activation for the row if it is digit 1 or 8 and then if it is larger than boundary it is 8 and smaller is 1\nwith open('test_in.csv') as test_in:\n\treadtest_in = csv.reader(test_in, delimiter=',')\n\tfor row in readtest_in:\n\t\tactivation=0\n\t\tfor i in range(ntest[1]):\n\t\t\tif readtest_in.line_num==testdigitsoflines[1][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,8])\n\t\t\n\t\tfor i in range(ntest[8]):\n\t\t\tif readtest_in.line_num==testdigitsoflines[8][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,8])\n\ntesttrue=[]\ntestcounteractivation1=0\ntestcounteractivation8=0\n\n#Compare predicted digits with real digits and determine accuracy\nfor i in range(len(testpredictionactivation)):\n\tif testpredictionactivation[i][1]==1:\n\t\tfor j in range(len(testdigitsoflines[1])):\n\t\t\tif testpredictionactivation[i][0]==testdigitsoflines[1][j]:\n\t\t\t\ttestcounteractivation1+=1\n\telif testpredictionactivation[i][1]==8:\n\t\tfor j in range(len(testdigitsoflines[8])):\n\t\t\tif testpredictionactivation[i][0]==testdigitsoflines[8][j]:\n\t\t\t\ttestcounteractivation8+=1\n\ntestaccuracy1=testcounteractivation1/ntest[1]\ntestaccuracy8=testcounteractivation8/ntest[8]\ntestaccuracy=(testcounteractivation1+testcounteractivation8)/(ntest[1]+ntest[8])\n\nprint(\"The test accuracy for correctly classifying the digit 1 is \"+str(testaccuracy1))\nprint(\"The test accuracy for correctly classifying the digit 8 is \"+str(testaccuracy8))\nprint(\"The test accuracy for correctly classifying the digits 1 and 8 is \"+str(testaccuracy))\n"
},
{
"alpha_fraction": 0.6769771575927734,
"alphanum_fraction": 0.6949033141136169,
"avg_line_length": 43.8870964050293,
"blob_id": "6274ad71d411c869152d90a1c332084c4d4b91b9",
"content_id": "29485dc404638bdfe02ce896549178a488f53df9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2845,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 62,
"path": "/Assignment 1/Final/Assignment-1-1-final.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport numpy.ma as ma\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\n\r\n# List for the ten digits, each containing list of the 256 values of each image\r\ndigitsoflines=[[] for i in range(10)]\r\n# Array for calculating the centers of all 10 digits\r\nSum=[np.zeros(256) for i in range(10)] #Store sum of all training sets for each digit\r\n# Array for coordinates of the center of each digit\r\nc=[np.zeros(256) for i in range(10)]\r\n# Radii of all digits in phase space\r\nr=np.zeros(10)\r\n# Number of images depicting digits in training data\r\nn=np.zeros(10) \r\n\r\n# Compute number of training points for each digit \r\nwith open('data/train_out.csv') as train_out:\r\n\treadtrain_out = csv.reader(train_out, delimiter=',')\r\n\tfor row in readtrain_out:\r\n\t\tfor i in range(10):\r\n\t\t\tif int(row[0])==i:\t\t\t\r\n\t\t\t\tdigitsoflines[i].append(readtrain_out.line_num)\r\nn=[len(digitsoflines[i]) for i in range(10)] \r\n\r\n# Calculate array \"Sum\" to get coordinates of center for each digit\r\nwith open('data/train_in.csv') as train_in:\r\n\treadtrain_in = csv.reader(train_in, delimiter=',')\r\n\tfor row in readtrain_in:\r\n\t\tfor i in range(10): #Find out which digit the row is\r\n\t\t\tfor j in range(n[i]):\r\n\t\t\t\tif readtrain_in.line_num==digitsoflines[i][j]:\r\n\t\t\t\t\t#Converts the string list row in to int list and adds to sum\r\n\t\t\t\t\tSum[i]=Sum[i]+np.array(list(map(float, row))) \r\n\r\n# Compute the center of each digit\r\nfor i in range(10):\r\n\tc[i]=Sum[i]/n[i]\r\n\r\n# Compute distance of image to its center in \"r\" and update value if the new value is bigger\r\n# Thus we end up with the highest radius in the end for all ten digits\r\nwith open('data/train_in.csv') as train_in:\r\n\treadtrain_in = csv.reader(train_in, delimiter=',')\r\n\tfor row in readtrain_in:\r\n\t\tfor i in range(10): #Find out which digit the row is\r\n\t\t\tfor j in range(n[i]):\r\n\t\t\t\tif readtrain_in.line_num==digitsoflines[i][j]:\r\n\t\t\t\t\tif np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row))))>r[i]:\r\n\t\t\t\t\t\tr[i]=np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row))))\r\n\r\n# Array for distances between the 10 centers\r\ndistancematrix=np.zeros([10,10])\r\n\r\n# Calculate distances between the 10 centers and print out the distance to the nearest center and its resp. digit\r\nfor i in range(10):\r\n\tfor j in range(10):\r\n\t\tdistancematrix[i][j]=np.dot(c[i]-c[j],c[i]-c[j])\r\n\tprint(\"The closest digit center between digit center \"+str(i)+\" is \"+str(np.argmin(ma.array(distancematrix[i],mask=np.identity(10)[i])))) #Use masked array to ignore 0 selfdistance\r\n\tprint(\"with distance: \"+str(np.amin(ma.array(distancematrix[i],mask=np.identity(10)[i])))) #Minimum distance between a digit center and other digit centers excluding itself\r\n\tprint(\"The radius of digit\", str(i), \"is\", str(r[i]), \"\\n\")\r\n\r\nprint(\"Thus digit 7 and 9 seem to be the hardest to differentiate from one another\")\r\n"
},
{
"alpha_fraction": 0.7235265970230103,
"alphanum_fraction": 0.7350263595581055,
"avg_line_length": 39.7599983215332,
"blob_id": "b1072e89aec236c5bca3a27ad609a803ba062ae3",
"content_id": "a6ec1a2233930d40f472f57ba2ee7b561891c1b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4174,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 100,
"path": "/Assignment 1/Final/Assignment-1-4-final.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport numpy.ma as ma\r\nimport csv\r\nimport time\r\n\r\ntraindigitsoflines=[] #Store which lines belong to which digits\r\nstart = time.time()\r\n\r\n#Read out which lines belong to which digits\r\nwith open('data/train_out.csv') as train_out:\r\n\treadtrain_out = csv.reader(train_out, delimiter=',')\r\n\tfor row in readtrain_out:\r\n\t\ttraindigitsoflines.append([readtrain_out.line_num,int(row[0])]) \r\n\r\n#Initialize 256+1 times 10 random weights for the 256 nodes + bias for the 10 output nodes.\r\nweights=np.random.randn(10,257)\r\n\r\nx_train=[[] for i in range(len(traindigitsoflines))]\r\ny_train=[[] for i in range(len(traindigitsoflines))]\r\n\r\n#We read in the training data and store it in x_train as row vectors with length 257 and make the first prediction y_train with the randomly initialized weights\r\nwith open('data/train_in.csv') as train_in:\r\n\treadtrain_in = csv.reader(train_in, delimiter=',')\r\n\tfor row in readtrain_in:\r\n\t\tx_train[readtrain_in.line_num-1].extend([1.]) #First term is for the bias\r\n\t\tx_train[readtrain_in.line_num-1].extend(list(map(float,row)))\r\n\t\ty_train[readtrain_in.line_num-1].extend(np.dot(weights,x_train[readtrain_in.line_num-1]))\r\n\r\n#Store the index of the misclassified data for the training set\r\nmisclassified=[]\r\nfor i in range(len(traindigitsoflines)):\r\n\tif np.argmax(y_train[i])!=traindigitsoflines[i][1]:\r\n\t\tmisclassified.append(i)\r\n\r\n#Keep updating weights until there are no more misclassified examples\r\niterationcounter=0\r\nwhile len(misclassified)!=0:\r\n\titerationcounter+=1\r\n\t#pick random sample out of misclassified set\r\n\tsample=np.random.choice(misclassified)\r\n\t#train the weights for the sample until the max of the output vector is equal to the digit\r\n\twhile np.argmax(y_train[sample])!=traindigitsoflines[sample][1]:\r\n\t\tfor j in [k for k in range(10) if k!=traindigitsoflines[sample][1]]:\r\n\t\t\tif y_train[sample][j]>=y_train[sample][traindigitsoflines[sample][1]]:\r\n\t\t\t\tweights[j]-=x_train[sample]\r\n\t\tweights[traindigitsoflines[sample][1]]+=x_train[sample]\r\n\t\ty_train[sample]=np.dot(weights,x_train[sample])\r\n\t\r\n\t#Update predictions with new weights\r\n\tfor i in range(len(traindigitsoflines)):\r\n\t\ty_train[i]=np.dot(weights,x_train[i])\r\n\r\n\t#Update the index of the misclassified data for the training set with the updated weights\r\n\tmisclassified=[]\r\n\tfor i in range(len(traindigitsoflines)):\r\n\t\tif np.argmax(y_train[i])!=traindigitsoflines[i][1]:\r\n\t\t\tmisclassified.append(i)\r\n\r\nprint('Converged on training set in '+str(iterationcounter)+' steps')\r\n\r\n#Store the predictions from the training set with the final weights\r\ntrainprediction=[]\r\nfor i in range(len(traindigitsoflines)):\r\n\ttrainprediction.append(np.argmax(y_train[i]))\r\n\r\n#Determine accuracy on training set\r\ntraincounter=0\r\nfor i in range(len(traindigitsoflines)):\r\n\tif trainprediction[i]==traindigitsoflines[i][1]:\r\n\t\ttraincounter+=1\r\ntrainaccuracy=traincounter/len(traindigitsoflines)\r\nprint('Accuracy on training set is: '+str(trainaccuracy))\r\n\r\n#Store which lines belong to which digits for test set\r\ntestdigitsoflines=[] \r\nwith open('data/test_out.csv') as test_out:\r\n\treadtest_out = csv.reader(test_out, delimiter=',')\r\n\tfor row in readtest_out:\r\n\t\ttestdigitsoflines.append([readtest_out.line_num,int(row[0])]) #Read out which lines belong to which digits\r\n\r\nx_test=[[] for i in range(len(testdigitsoflines))]\r\ny_test=[[] for i in range(len(testdigitsoflines))]\r\n\r\n#Read in data for test set and determine digit with the determined weights from the training set\r\nwith open('data/test_in.csv') as test_in:\r\n\treadtest_in = csv.reader(test_in, delimiter=',')\r\n\tfor row in readtest_in:\r\n\t\tx_test[readtest_in.line_num-1].extend([1.]) #First term is for the bias\r\n\t\tx_test[readtest_in.line_num-1].extend(list(map(float,row)))\r\n\t\ty_test[readtest_in.line_num-1].extend(np.dot(weights,x_test[readtest_in.line_num-1]))\r\ntestprediction=[np.argmax(y_test[i]) for i in range(len(y_test))]\r\n\r\n#Determine accuracy on test set\r\ntestcounter=0\r\nfor i in range(len(testdigitsoflines)):\r\n\tif testprediction[i]==testdigitsoflines[i][1]:\r\n\t\ttestcounter+=1\r\ntestaccuracy=testcounter/len(testdigitsoflines)\r\nprint('Accuracy on test set is: '+str(testaccuracy))\r\nprint (time.time()-start)"
},
{
"alpha_fraction": 0.7636833786964417,
"alphanum_fraction": 0.775894284248352,
"avg_line_length": 45.09933853149414,
"blob_id": "9a53fa2211f78d4a2a315e31fc4d851034ef9722",
"content_id": "b89e43f91c6beeebe54445bdce0b34bb932c3bd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6961,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 151,
"path": "/Assignment 1/Assignment-1_2.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport numpy.ma as ma\nfrom sklearn.metrics import confusion_matrix\nimport sklearn\nimport matplotlib.pyplot as plt\nimport csv\n\ndigitsoflines=[[] for i in range(10)] #Store which lines belong to which digits\nSum=[np.zeros(256) for i in range(10)] #Store sum of all training sets for each digit\nc=[np.zeros(256) for i in range(10)] #Store the centers of each digit\nn=np.zeros(10) #Store number of training points for each digit\n\nwith open('train_out.csv') as train_out:\n\treadtrain_out = csv.reader(train_out, delimiter=',')\n\tfor row in readtrain_out:\n\t\tfor i in range(10):\n\t\t\tif int(row[0])==i:\n\t\t\t\tdigitsoflines[i].append(readtrain_out.line_num) #Read out which lines belong to which digits\n\nn=[len(digitsoflines[i]) for i in range(10)] #Compute number of training points for each digit\n\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tfor i in range(10): #Find out which digit the row is\n\t\t\tfor j in range(n[i]):\n\t\t\t\tif readtrain_in.line_num==digitsoflines[i][j]:\n\t\t\t\t\tSum[i]=Sum[i]+np.array(list(map(float, row))) #Converts the string list row in to int list and adds to sum\n\nfor i in range(10):\n\tc[i]=Sum[i]/n[i] #Compute the center of each digit by dividing the sum of all the training points by the number of training points for each digit\n\ntrainpredictioneuclid=[]\ntrainpredictionmanhattan=[]\ntrainpredictioncosine=[]\ntrainpredictioncorrelation=[]\n\t\t\nwith open('train_in.csv') as train_in: #Compute distance between centers and data point and prediction is closest center\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tdisteuclid=[]\n\t\tdistmanhattan=[]\n\t\tdistcosine=[]\n\t\tdistcorrelation=[]\n\t\tfor i in range(10):\n\t\t\tdisteuclid.append(np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row)))))\n\t\t\tdistmanhattan.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='manhattan'))\n\t\t\tdistcosine.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='cosine'))\n\t\t\tdistcorrelation.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='correlation'))\n\t\t\t\n\t\ttrainpredictioneuclid.append(np.argmin(disteuclid))\n\t\ttrainpredictionmanhattan.append(np.argmin(distmanhattan))\n\t\ttrainpredictioncosine.append(np.argmin(distcosine))\n\t\ttrainpredictioncorrelation.append(np.argmin(distcorrelation))\n\ntraintrue=[]\ntraincountereuclid=0\ntraincountermanhattan=0\ntraincountercosine=0\ntraincountercorrelation=0\n\nwith open('train_out.csv') as train_out:\n\treadtrain_out = csv.reader(train_out, delimiter=',')\n\tfor row in readtrain_out:\n\t\ttraintrue.append(int(row[0]))\n\t\tif int(row[0])==trainpredictioneuclid[readtrain_out.line_num-1]:\n\t\t\ttraincountereuclid+=1\n\t\tif int(row[0])==trainpredictionmanhattan[readtrain_out.line_num-1]:\n\t\t\ttraincountermanhattan+=1\n\t\tif int(row[0])==trainpredictioncosine[readtrain_out.line_num-1]:\n\t\t\ttraincountercosine+=1\n\t\tif int(row[0])==trainpredictioncorrelation[readtrain_out.line_num-1]:\n\t\t\ttraincountercorrelation+=1\n\nprint(\"Confusion matrix for training set using euclidean distance:\")\nprint(confusion_matrix(trainpredictioneuclid,traintrue))\nprint(\"Confusion matrix for training set using manhattan distance:\")\nprint(confusion_matrix(trainpredictionmanhattan,traintrue))\nprint(\"Confusion matrix for training set using cosine distance:\")\nprint(confusion_matrix(trainpredictioncosine,traintrue))\nprint(\"Confusion matrix for training set using correlation distance:\")\nprint(confusion_matrix(trainpredictioncorrelation,traintrue))\n\nprint(\"The accuracy of the euclidean distance algorithm on the training set is:\")\nprint(100*traincountereuclid/len(trainpredictioneuclid))\nprint(\"The accuracy of the manhattan distance algorithm on the training set is:\")\nprint(100*traincountermanhattan/len(trainpredictionmanhattan))\nprint(\"The accuracy of the cosine distance algorithm on the training set is:\")\nprint(100*traincountercosine/len(trainpredictioncosine))\nprint(\"The accuracy of the correlation distance algorithm on the training set is:\")\nprint(100*traincountercorrelation/len(trainpredictioncorrelation))\n\ntestpredictioneuclid=[]\ntestpredictionmanhattan=[]\ntestpredictioncosine=[]\ntestpredictioncorrelation=[]\n\nwith open('test_in.csv') as test_in: #Compute distance between centers and data point and prediction is closest center\n\treadtest_in = csv.reader(test_in, delimiter=',')\n\tfor row in readtest_in:\n\t\tdisteuclid=[]\n\t\tdistmanhattan=[]\n\t\tdistcosine=[]\n\t\tdistcorrelation=[]\n\t\tfor i in range(10):\n\t\t\tdisteuclid.append(np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row)))))\n\t\t\tdistmanhattan.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='manhattan'))\n\t\t\tdistcosine.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='cosine'))\n\t\t\tdistcorrelation.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='correlation'))\n\t\t\t\n\t\ttestpredictioneuclid.append(np.argmin(disteuclid))\n\t\ttestpredictionmanhattan.append(np.argmin(distmanhattan))\n\t\ttestpredictioncosine.append(np.argmin(distcosine))\n\t\ttestpredictioncorrelation.append(np.argmin(distcorrelation))\n\ntesttrue=[]\ntestcountereuclid=0\ntestcountermanhattan=0\ntestcountercosine=0\ntestcountercorrelation=0\n\nwith open('test_out.csv') as test_out:\n\treadtest_out = csv.reader(test_out, delimiter=',')\n\tfor row in readtest_out:\n\t\ttesttrue.append(int(row[0]))\n\t\tif int(row[0])==testpredictioneuclid[readtest_out.line_num-1]:\n\t\t\ttestcountereuclid+=1\n\t\tif int(row[0])==testpredictionmanhattan[readtest_out.line_num-1]:\n\t\t\ttestcountermanhattan+=1\n\t\tif int(row[0])==testpredictioncosine[readtest_out.line_num-1]:\n\t\t\ttestcountercosine+=1\n\t\tif int(row[0])==testpredictioncorrelation[readtest_out.line_num-1]:\n\t\t\ttestcountercorrelation+=1\n\nprint(\"Confusion matrix for test set using euclidean distance:\")\nprint(confusion_matrix(testpredictioneuclid,testtrue))\nprint(\"Confusion matrix for test set using manhattan distance:\")\nprint(confusion_matrix(testpredictionmanhattan,testtrue))\nprint(\"Confusion matrix for test set using cosine distance:\")\nprint(confusion_matrix(testpredictioncosine,testtrue))\nprint(\"Confusion matrix for test set using correlation distance:\")\nprint(confusion_matrix(testpredictioncorrelation,testtrue))\n\nprint(\"The accuracy of the euclidean distance algorithm on the test set is:\")\nprint(100*testcountereuclid/len(testpredictioneuclid))\nprint(\"The accuracy of the manhattan distance algorithm on the test set is:\")\nprint(100*testcountermanhattan/len(testpredictionmanhattan))\nprint(\"The accuracy of the cosine distance algorithm on the test set is:\")\nprint(100*testcountercosine/len(testpredictioncosine))\nprint(\"The accuracy of the correlation distance algorithm on the test set is:\")\nprint(100*testcountercorrelation/len(testpredictioncorrelation))\n"
},
{
"alpha_fraction": 0.7411853075027466,
"alphanum_fraction": 0.7531883120536804,
"avg_line_length": 40.226802825927734,
"blob_id": "7ff820b06a7b22ce50655b743faa1c1eab6fb607",
"content_id": "064143cc2f43fac25e441d9c530a82345a836424",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3999,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 97,
"path": "/Assignment 1/Assignment-1_4.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport numpy.ma as ma\nimport csv\n\ntraindigitsoflines=[] #Store which lines belong to which digits\n\n#Read out which lines belong to which digits\nwith open('train_out.csv') as train_out:\n\treadtrain_out = csv.reader(train_out, delimiter=',')\n\tfor row in readtrain_out:\n\t\ttraindigitsoflines.append([readtrain_out.line_num,int(row[0])]) \n\n#Iniatlize 256+1 times 10 random weights for the 256 nodes + bias for the 10 output nodes.\nweights=np.random.randn(10,257)\n\nx_train=[[] for i in range(len(traindigitsoflines))]\ny_train=[[] for i in range(len(traindigitsoflines))]\n\n#We read in the training data and store it in x_train as row vectors with length 257 and make the first prediction y_train with the randomly initialized weights\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tx_train[readtrain_in.line_num-1].extend([1.]) #First term is for the bias\n\t\tx_train[readtrain_in.line_num-1].extend(list(map(float,row)))\n\t\ty_train[readtrain_in.line_num-1].extend(np.dot(weights,x_train[readtrain_in.line_num-1]))\n\n#Store the index of the misclassified data for the training set\nmisclassified=[]\nfor i in range(len(traindigitsoflines)):\n\tif np.argmax(y_train[i])!=traindigitsoflines[i][1]:\n\t\tmisclassified.append(i)\n\n#Keep updating weights untill there are no more misclassified examples\niterationcounter=0\nwhile len(misclassified)!=0:\n\titerationcounter+=1\n\t#pick random sample out of misclassified set\n\tsample=np.random.choice(misclassified)\n\t#train the weights for the sample untill the max of the output vector is equal to the digit\n\twhile np.argmax(y_train[sample])!=traindigitsoflines[sample][1]:\n\t\tfor j in [k for k in range(10) if k!=traindigitsoflines[sample][1]]:\n\t\t\tif y_train[sample][j]>=y_train[sample][traindigitsoflines[sample][1]]:\n\t\t\t\tweights[j]-=x_train[sample]\n\t\tweights[traindigitsoflines[sample][1]]+=x_train[sample]\n\t\ty_train[sample]=np.dot(weights,x_train[sample])\n\t\n\t#Update predictions with new weights\n\tfor i in range(len(traindigitsoflines)):\n\t\ty_train[i]=np.dot(weights,x_train[i])\n\n\t#Update the index of the misclassified data for the training set with the updated weights\n\tmisclassified=[]\n\tfor i in range(len(traindigitsoflines)):\n\t\tif np.argmax(y_train[i])!=traindigitsoflines[i][1]:\n\t\t\tmisclassified.append(i)\n\nprint('Converged on training set in '+str(iterationcounter)+' steps')\n\n#Store the predictions from the training set with the final weights\ntrainprediction=[]\nfor i in range(len(traindigitsoflines)):\n\ttrainprediction.append(np.argmax(y_train[i]))\n\n#Determine accuracy on training set\ntraincounter=0\nfor i in range(len(traindigitsoflines)):\n\tif trainprediction[i]==traindigitsoflines[i][1]:\n\t\ttraincounter+=1\ntrainaccuracy=traincounter/len(traindigitsoflines)\nprint('Accuracy on training set is: '+str(trainaccuracy))\n\n#Store which lines belong to which digits for test set\ntestdigitsoflines=[] \nwith open('test_out.csv') as test_out:\n\treadtest_out = csv.reader(test_out, delimiter=',')\n\tfor row in readtest_out:\n\t\ttestdigitsoflines.append([readtest_out.line_num,int(row[0])]) #Read out which lines belong to which digits\n\nx_test=[[] for i in range(len(testdigitsoflines))]\ny_test=[[] for i in range(len(testdigitsoflines))]\n\n#Read in data for test set and determine digit with the determined weights from the training set\nwith open('test_in.csv') as test_in:\n\treadtest_in = csv.reader(test_in, delimiter=',')\n\tfor row in readtest_in:\n\t\tx_test[readtest_in.line_num-1].extend([1.]) #First term is for the bias\n\t\tx_test[readtest_in.line_num-1].extend(list(map(float,row)))\n\t\ty_test[readtest_in.line_num-1].extend(np.dot(weights,x_test[readtest_in.line_num-1]))\ntestprediction=[np.argmax(y_test[i]) for i in range(len(y_test))]\n\n#Determine accuracy on test set\ntestcounter=0\nfor i in range(len(testdigitsoflines)):\n\tif testprediction[i]==testdigitsoflines[i][1]:\n\t\ttestcounter+=1\ntestaccuracy=testcounter/len(testdigitsoflines)\nprint('Accuracy on test set is: '+str(testaccuracy))\n"
},
{
"alpha_fraction": 0.6947535872459412,
"alphanum_fraction": 0.7114467620849609,
"avg_line_length": 48.33333206176758,
"blob_id": "8903d52956661feebe6a140d16331650a0298dea",
"content_id": "a9cb8d9cd0de2781facced94411045ec5a11b59b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2516,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 51,
"path": "/Assignment 1/Assignment-1_1.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport numpy.ma as ma\nimport matplotlib.pyplot as plt\nimport csv\n\ndigitsoflines=[[] for i in range(10)] #Store which lines belong to which digits\nSum=[np.zeros(256) for i in range(10)] #Store sum of all training sets for each digit\nc=[np.zeros(256) for i in range(10)] #Store the centers of each digit\nr=np.zeros(10) #Store radius of each digit\nn=np.zeros(10) #Store number of training points for each digit\n\nwith open('train_out.csv') as train_out:\n\treadtrain_out = csv.reader(train_out, delimiter=',')\n\tfor row in readtrain_out:\n\t\tfor i in range(10):\n\t\t\tif int(row[0])==i:\n\t\t\t\tdigitsoflines[i].append(readtrain_out.line_num) #Read out which lines belong to which digits\n\nn=[len(digitsoflines[i]) for i in range(10)] #Compute number of training points for each digit\n\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tfor i in range(10): #Find out which digit the row is\n\t\t\tfor j in range(n[i]):\n\t\t\t\tif readtrain_in.line_num==digitsoflines[i][j]:\n\t\t\t\t\tSum[i]=Sum[i]+np.array(list(map(float, row))) #Converts the string list row in to int list and adds to sum\n\nfor i in range(10):\n\tc[i]=Sum[i]/n[i] #Compute the center of each digit by dividing the sum of all the training points by the number of training points for each digit\n\t\t\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tfor i in range(10): #Find out which digit the row is\n\t\t\tfor j in range(n[i]):\n\t\t\t\tif readtrain_in.line_num==digitsoflines[i][j]:\n\t\t\t\t\tif np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row))))>r[i]: #computer distance between center and datapoint\n\t\t\t\t\t\tr[i]=np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row)))) #Update if distance bigger than previous\n\ndistancematrix=np.zeros([10,10])\n\nfor i in range(10):\n\tfor j in range(10):\n\t\tdistancematrix[i][j]=np.dot(c[i]-c[j],c[i]-c[j]) #Computer distances between the centers\n\tprint(\"The closest digit center between digit center \"+str(i)+\" is \"+str(np.argmin(ma.array(distancematrix[i],mask=np.identity(10)[i])))) #Use masked array to ignore 0 selfdistance\n\tprint(\"with distance: \"+str(np.amin(ma.array(distancematrix[i],mask=np.identity(10)[i])))) #Minimum distance between a digit center and other digit centers excluding itself\n\tprint(\"The radius of digit \"+str(i)+\" is \"+str(r[i]))\n\tprint(\"\")\n\nprint(\"Thus digit 7 and 9 seem to be the hardest to differentiate from one another\")\n"
},
{
"alpha_fraction": 0.7036182880401611,
"alphanum_fraction": 0.7370621562004089,
"avg_line_length": 39.0982666015625,
"blob_id": "b7541a0e39cca8ebe0c53735e472e2e23c2c2a99",
"content_id": "b5c3e5f04d9aca59ceebb74a8b405bb414d75646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6937,
"license_type": "no_license",
"max_line_length": 338,
"num_lines": 173,
"path": "/Assignment 1/Assignment-1_3_Bayes.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport numpy.ma as ma\nimport matplotlib.pyplot as plt\nimport csv\nfrom sklearn.neighbors import KernelDensity\n\ntraindigitsoflines=[[] for i in range(10)] #Store which lines belong to which digits\nntrain=np.zeros(10) #Store number of training points for each digit\n\nwith open('train_out.csv') as train_out:\n\treadtrain_out = csv.reader(train_out, delimiter=',')\n\tfor row in readtrain_out:\n\t\tfor i in range(10):\n\t\t\tif int(row[0])==i:\n\t\t\t\ttraindigitsoflines[i].append(readtrain_out.line_num) #Read out which lines belong to which digits\n\nntrain=[len(traindigitsoflines[i]) for i in range(10)] #Compute number of training points for each digit\n\n#We want to distinguish between 1 and 8. The X we will compute is the number of activated pixels. We will first compute P(C1|X) and P(C8|X) to determine a good boundary between them. If the number X for a certain row is smaller than this boundary we will predict it is a 1 and if it is larger than the boundary we will predict it is an 8.\n\nactivation1=[]\nactivation8=[]\n\n#Check for each row in train_in whether it is a 1 or an 8 and store the sum of the pixel values.\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tfor i in range(ntrain[1]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[1][i]:\n\t\t\t\tcomputeactivation1=0\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tcomputeactivation1+=float(row[j])\n\t\t\t\tactivation1.append(computeactivation1)\n\n\t\tfor i in range(ntrain[8]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[8][i]:\n\t\t\t\tcomputeactivation8=0\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tcomputeactivation8+=float(row[j])\n\t\t\t\tactivation8.append(computeactivation8)\n\tactivation1=np.array(activation1)\n\tactivation8=np.array(activation8)\n\n\n#Use Bayes theorem: P(C1|X)=P(X|C1)*P(C1)\nP_C1=ntrain[1]/(ntrain[1]+ntrain[8])\nP_C8=ntrain[8]/(ntrain[1]+ntrain[8])\n\nP_X_C1, bins1 = np.histogram(activation1, range=[-250,0], density=True, bins=30)\nwidths1 = np.diff(bins1)\nP_C1_X = P_X_C1 * P_C1\n\nP_X_C8, bins8 = np.histogram(activation8, range=[-250,0], density=True, bins=30)\nwidths8 = np.diff(bins8)\nP_C8_X = P_X_C8 * P_C8\n\np1=plt.bar(bins1[:-1], P_C1_X, widths1, alpha=0.5)\np8=plt.bar(bins8[:-1], P_C8_X, widths8, alpha=0.5)\nplt.legend((p1[0], p8[0]), ('P(C1|X)', 'P(C8|X)'))\nplt.xlabel('X')\nplt.ylabel('P(C|X)')\nplt.savefig('Histograms')\n\n#When P_C8_X=P_C1_X we have the boundary, but because we determine the probabilities from histograms they are not continuous. So the criteria we use is that they lie very close to each other.\nfor i in range(len(P_C1_X)):\n\tif abs(P_C8_X[i]-P_C1_X[i])<0.0008 and P_C1_X[i]!=0 and P_C8_X[i]!=0:\n\t\tboundary=bins1[i]\n\n#Compute activation for the row if it is digit 1 or 8 and then if it is larger than boundary it is 8 and smaller is 1\ntrainpredictionactivation=[]\nwith open('train_in.csv') as train_in:\n\treadtrain_in = csv.reader(train_in, delimiter=',')\n\tfor row in readtrain_in:\n\t\tactivation=0\n\t\tfor i in range(ntrain[1]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[1][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,8])\n\t\t\n\t\tfor i in range(ntrain[8]):\n\t\t\tif readtrain_in.line_num==traindigitsoflines[8][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttrainpredictionactivation.append([readtrain_in.line_num,8])\n\ntraintrue=[]\ntraincounteractivation1=0\ntraincounteractivation8=0\n\n#Compare predicted digits with real digits and determine accuracy\nfor i in range(len(trainpredictionactivation)):\n\tif trainpredictionactivation[i][1]==1:\n\t\tfor j in range(len(traindigitsoflines[1])):\n\t\t\tif trainpredictionactivation[i][0]==traindigitsoflines[1][j]:\n\t\t\t\ttraincounteractivation1+=1\n\telif trainpredictionactivation[i][1]==8:\n\t\tfor j in range(len(traindigitsoflines[8])):\n\t\t\tif trainpredictionactivation[i][0]==traindigitsoflines[8][j]:\n\t\t\t\ttraincounteractivation8+=1\n\ntrainaccuracy1=traincounteractivation1/ntrain[1]\ntrainaccuracy8=traincounteractivation8/ntrain[8]\ntrainaccuracy=(traincounteractivation1+traincounteractivation8)/(ntrain[1]+ntrain[8])\n\nprint(\"The train accuracy for correctly classifying the digit 1 is \"+str(trainaccuracy1))\nprint(\"The train accuracy for correctly classifying the digit 8 is \"+str(trainaccuracy8))\nprint(\"The train accuracy for correctly classifying the digits 1 and 8 is \"+str(trainaccuracy))\n\ntestdigitsoflines=[[] for i in range(10)] #Store which lines belong to which digits\nntest=np.zeros(10) #Store number of training points for each digit\n\nwith open('test_out.csv') as test_out:\n\treadtest_out = csv.reader(test_out, delimiter=',')\n\tfor row in readtest_out:\n\t\tfor i in range(10):\n\t\t\tif int(row[0])==i:\n\t\t\t\ttestdigitsoflines[i].append(readtest_out.line_num) #Read out which lines belong to which digits\n\nntest=[len(testdigitsoflines[i]) for i in range(10)] #Compute number of training points for each digit\n\n#Compute activation for the row if it is digit 1 or 8 and then if it is larger than boundary it is 8 and smaller is 1\ntestpredictionactivation=[]\nwith open('test_in.csv') as test_in:\n\treadtest_in = csv.reader(test_in, delimiter=',')\n\tfor row in readtest_in:\n\t\tactivation=0\n\t\tfor i in range(ntest[1]):\n\t\t\tif readtest_in.line_num==testdigitsoflines[1][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,8])\n\t\t\n\t\tfor i in range(ntest[8]):\n\t\t\tif readtest_in.line_num==testdigitsoflines[8][i]:\n\t\t\t\tfor j in range(256):\n\t\t\t\t\tactivation+=float(row[j])\n\t\t\t\tif activation<boundary:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,1])\n\t\t\t\telse:\n\t\t\t\t\ttestpredictionactivation.append([readtest_in.line_num,8])\n\ntesttrue=[]\ntestcounteractivation1=0\ntestcounteractivation8=0\n\n#Compare predicted digits with real digits and determine accuracy\nfor i in range(len(testpredictionactivation)):\n\tif testpredictionactivation[i][1]==1:\n\t\tfor j in range(len(testdigitsoflines[1])):\n\t\t\tif testpredictionactivation[i][0]==testdigitsoflines[1][j]:\n\t\t\t\ttestcounteractivation1+=1\n\telif testpredictionactivation[i][1]==8:\n\t\tfor j in range(len(testdigitsoflines[8])):\n\t\t\tif testpredictionactivation[i][0]==testdigitsoflines[8][j]:\n\t\t\t\ttestcounteractivation8+=1\n\ntestaccuracy1=testcounteractivation1/ntest[1]\ntestaccuracy8=testcounteractivation8/ntest[8]\ntestaccuracy=(testcounteractivation1+testcounteractivation8)/(ntest[1]+ntest[8])\n\nprint(\"The test accuracy for correctly classifying the digit 1 is \"+str(testaccuracy1))\nprint(\"The test accuracy for correctly classifying the digit 8 is \"+str(testaccuracy8))\nprint(\"The test accuracy for correctly classifying the digits 1 and 8 is \"+str(testaccuracy))\n"
},
{
"alpha_fraction": 0.7461044788360596,
"alphanum_fraction": 0.758151113986969,
"avg_line_length": 44.01807403564453,
"blob_id": "8c7b7d169b07816d0007734d1c8c8b47e2c174d3",
"content_id": "45946b23061dbd40f6fd33ab879e60986d0d171e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7637,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 166,
"path": "/Assignment 1/Final/Assignment-1-2-final.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport numpy.ma as ma\r\nfrom sklearn.metrics import confusion_matrix\r\nimport sklearn\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\n\r\n# List for the ten digits, each containing list of the 256 values of each image\r\ndigitsoflines=[[] for i in range(10)]\r\n# Array for calculating the centers of all 10 digits\r\nSum=[np.zeros(256) for i in range(10)] #Store sum of all training sets for each digit\r\n# Array for coordinates of the center of each digit\r\nc=[np.zeros(256) for i in range(10)]\r\n# Radii of all digits in phase space\r\nr=np.zeros(10)\r\n# Number of images depicting digits in training data\r\nn=np.zeros(10) \r\n\r\n# Read in data from training set\r\nwith open('data/train_out.csv') as train_out:\r\n\treadtrain_out = csv.reader(train_out, delimiter=',')\r\n\tfor row in readtrain_out:\r\n\t\tfor i in range(10):\r\n\t\t\tif int(row[0])==i:\r\n\t\t\t\tdigitsoflines[i].append(readtrain_out.line_num) \r\n\r\n# Compute number of training points for each digit\r\nn=[len(digitsoflines[i]) for i in range(10)]\r\n\r\n# Calculate array \"Sum\" to get coordinates of center for each digit\r\nwith open('data/train_in.csv') as train_in:\r\n\treadtrain_in = csv.reader(train_in, delimiter=',')\r\n\tfor row in readtrain_in:\r\n\t\tfor i in range(10): #Find out which digit the row is\r\n\t\t\tfor j in range(n[i]):\r\n\t\t\t\tif readtrain_in.line_num==digitsoflines[i][j]:\r\n\t\t\t\t\tSum[i]=Sum[i]+np.array(list(map(float, row))) #Converts the string list row in to int list and adds to sum\r\n\r\n# Compute the center of each digit\r\nfor i in range(10):\r\n\tc[i]=Sum[i]/n[i]\r\n\r\n# List of closest distances to another center for all images\r\ntrainpredictioneuclid=[]\r\ntrainpredictionmanhattan=[]\r\ntrainpredictioncosine=[]\r\ntrainpredictioncorrelation=[]\r\n\r\n# Compute distance between centers and data point for all images\r\nwith open('data/train_in.csv') as train_in: \r\n\treadtrain_in = csv.reader(train_in, delimiter=',')\r\n\tfor row in readtrain_in:\r\n\t\tdisteuclid=[]\r\n\t\tdistmanhattan=[]\r\n\t\tdistcosine=[]\r\n\t\tdistcorrelation=[]\r\n\t\t# Calculate distances to all centers using varius metrics\r\n\t\tfor i in range(10):\r\n\t\t\tdisteuclid.append(np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row)))))\r\n\t\t\tdistmanhattan.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='manhattan'))\r\n\t\t\tdistcosine.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='cosine'))\r\n\t\t\tdistcorrelation.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='correlation'))\r\n\t\t\t\r\n\t\ttrainpredictioneuclid.append(np.argmin(disteuclid))\r\n\t\ttrainpredictionmanhattan.append(np.argmin(distmanhattan))\r\n\t\ttrainpredictioncosine.append(np.argmin(distcosine))\r\n\t\ttrainpredictioncorrelation.append(np.argmin(distcorrelation))\r\n\r\n# Counter for success rates for each metric\r\ntraintrue=[]\r\ntraincountereuclid=0\r\ntraincountermanhattan=0\r\ntraincountercosine=0\r\ntraincountercorrelation=0\r\n\r\n# Calculate success rates for each metric\r\nwith open('data/train_out.csv') as train_out:\r\n\treadtrain_out = csv.reader(train_out, delimiter=',')\r\n\tfor row in readtrain_out:\r\n\t\ttraintrue.append(int(row[0]))\r\n\t\tif int(row[0])==trainpredictioneuclid[readtrain_out.line_num-1]:\r\n\t\t\ttraincountereuclid+=1\r\n\t\tif int(row[0])==trainpredictionmanhattan[readtrain_out.line_num-1]:\r\n\t\t\ttraincountermanhattan+=1\r\n\t\tif int(row[0])==trainpredictioncosine[readtrain_out.line_num-1]:\r\n\t\t\ttraincountercosine+=1\r\n\t\tif int(row[0])==trainpredictioncorrelation[readtrain_out.line_num-1]:\r\n\t\t\ttraincountercorrelation+=1\r\n\r\nprint(\"Confusion matrix for training set using euclidean distance:\", confusion_matrix(trainpredictioneuclid,traintrue))\r\nprint(\"Confusion matrix for training set using manhattan distance:\", confusion_matrix(trainpredictionmanhattan,traintrue))\r\nprint(\"Confusion matrix for training set using cosine distance:\", confusion_matrix(trainpredictioncosine,traintrue))\r\nprint(\"Confusion matrix for training set using correlation distance:\", confusion_matrix(trainpredictioncorrelation,traintrue))\r\n\r\nprint(\"The accuracy of the euclidean distance algorithm on the training set is:\")\r\nprint(100*traincountereuclid/len(trainpredictioneuclid))\r\nprint(\"The accuracy of the manhattan distance algorithm on the training set is:\")\r\nprint(100*traincountermanhattan/len(trainpredictionmanhattan))\r\nprint(\"The accuracy of the cosine distance algorithm on the training set is:\")\r\nprint(100*traincountercosine/len(trainpredictioncosine))\r\nprint(\"The accuracy of the correlation distance algorithm on the training set is:\")\r\nprint(100*traincountercorrelation/len(trainpredictioncorrelation))\r\n\r\n# List of closest distances to another center for al images\r\ntestpredictioneuclid=[]\r\ntestpredictionmanhattan=[]\r\ntestpredictioncosine=[]\r\ntestpredictioncorrelation=[]\r\n\r\n# Compute distance of data point to all centers and predict the digit\r\nwith open('data/test_in.csv') as test_in:\r\n\treadtest_in = csv.reader(test_in, delimiter=',')\r\n\tfor row in readtest_in:\r\n\t\tdisteuclid=[]\r\n\t\tdistmanhattan=[]\r\n\t\tdistcosine=[]\r\n\t\tdistcorrelation=[]\r\n\t\tfor i in range(10): # Find out which digit the row is\r\n\t\t\tdisteuclid.append(np.dot(c[i]-np.array(list(map(float,row))),c[i]-np.array(list(map(float,row)))))\r\n\t\t\tdistmanhattan.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='manhattan'))\r\n\t\t\tdistcosine.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='cosine'))\r\n\t\t\tdistcorrelation.append(sklearn.metrics.pairwise.pairwise_distances([c[i]],[np.array(list(map(float,row)))],metric='correlation'))\r\n\t\t\t\r\n\t\ttestpredictioneuclid.append(np.argmin(disteuclid))\r\n\t\ttestpredictionmanhattan.append(np.argmin(distmanhattan))\r\n\t\ttestpredictioncosine.append(np.argmin(distcosine))\r\n\t\ttestpredictioncorrelation.append(np.argmin(distcorrelation))\r\n\r\n# Counters for success rates\r\ntesttrue=[]\r\ntestcountereuclid=0\r\ntestcountermanhattan=0\r\ntestcountercosine=0\r\ntestcountercorrelation=0\r\n\r\n# Go through test data and calculate the success rates of our predictions\r\nwith open('data/test_out.csv') as test_out:\r\n\treadtest_out = csv.reader(test_out, delimiter=',')\r\n\tfor row in readtest_out:\r\n\t\ttesttrue.append(int(row[0]))\r\n\t\tif int(row[0])==testpredictioneuclid[readtest_out.line_num-1]:\r\n\t\t\ttestcountereuclid+=1\r\n\t\tif int(row[0])==testpredictionmanhattan[readtest_out.line_num-1]:\r\n\t\t\ttestcountermanhattan+=1\r\n\t\tif int(row[0])==testpredictioncosine[readtest_out.line_num-1]:\r\n\t\t\ttestcountercosine+=1\r\n\t\tif int(row[0])==testpredictioncorrelation[readtest_out.line_num-1]:\r\n\t\t\ttestcountercorrelation+=1\r\n\r\nprint(\"Confusion matrix for test set using euclidean distance:\")\r\nprint(confusion_matrix(testpredictioneuclid,testtrue))\r\nprint(\"Confusion matrix for test set using manhattan distance:\")\r\nprint(confusion_matrix(testpredictionmanhattan,testtrue))\r\nprint(\"Confusion matrix for test set using cosine distance:\")\r\nprint(confusion_matrix(testpredictioncosine,testtrue))\r\nprint(\"Confusion matrix for test set using correlation distance:\")\r\nprint(confusion_matrix(testpredictioncorrelation,testtrue))\r\n\r\nprint(\"The accuracy of the euclidean distance algorithm on the test set is:\")\r\nprint(100*testcountereuclid/len(testpredictioneuclid))\r\nprint(\"The accuracy of the manhattan distance algorithm on the test set is:\")\r\nprint(100*testcountermanhattan/len(testpredictionmanhattan))\r\nprint(\"The accuracy of the cosine distance algorithm on the test set is:\")\r\nprint(100*testcountercosine/len(testpredictioncosine))\r\nprint(\"The accuracy of the correlation distance algorithm on the test set is:\")\r\nprint(100*testcountercorrelation/len(testpredictioncorrelation))"
},
{
"alpha_fraction": 0.661512017250061,
"alphanum_fraction": 0.6958763003349304,
"avg_line_length": 28.63157844543457,
"blob_id": "e4d681e62080035e83e5172507521cd8042b3b33",
"content_id": "41e4c14d6aa51cc8cef970c679eefe09f466b621",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 19,
"path": "/Assignment 3/Roses/mission_control.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nContains all the variables necessary to run gans_n_roses.py file.\r\n\"\"\"\r\n\r\n# Set LOAD to True to load a trained model or set it False to train a new one.\r\nLOAD = False\r\n\r\n# Dataset directories\r\nDATASET_PATH = './Dataset/Roses/'\r\nDATASET_CHOSEN = 'roses' # required by utils.py -> ['birds', 'flowers', 'black_birds']\r\n\r\n\r\n# Model hyperparameters\r\nZ_DIM = 100 # The input noise vector dimension\r\nBATCH_SIZE = 12\r\nN_ITERATIONS = 3000\r\nLEARNING_RATE = 0.0002\r\nBETA_1 = 0.5\r\nIMAGE_SIZE = 128 # Change the Generator model if the IMAGE_SIZE needs to be changed to a different value\r\n"
},
{
"alpha_fraction": 0.6763284802436829,
"alphanum_fraction": 0.7085345983505249,
"avg_line_length": 29.049999237060547,
"blob_id": "83f06deacf98b3af159677fa50813e647a355b71",
"content_id": "d6105dad1b6c494123babe10a09a378ba679b98b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 621,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 20,
"path": "/Assignment 3/Code/mission_control.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nContains all the variables necessary to run main.py file.\r\n\"\"\"\r\n\r\n# Set LOAD to True to load a trained model or set it False to train a new one.\r\nLOAD = False\r\n\r\n# Dataset directories. Use the second line for Hubble training data.\r\nDATASET_PATH = './Dataset/SDSS/'\r\n#DATASET_PATH = './Dataset/Hubble/'\r\nDATASET_CHOSEN = 'galaxies' # required by utils.py\r\n\r\n\r\n# Model hyperparameters\r\nZ_DIM = 100 # The input noise vector dimension\r\nBATCH_SIZE = 12\r\nN_ITERATIONS = 30000\r\nLEARNING_RATE = 0.0002\r\nBETA_1 = 0.5\r\nIMAGE_SIZE = 64 # Change the Generator model if the IMAGE_SIZE needs to be changed to a different value\r\n"
},
{
"alpha_fraction": 0.6028833389282227,
"alphanum_fraction": 0.6828309297561646,
"avg_line_length": 41.38888931274414,
"blob_id": "0ae0ef73ca3503398228e452ce65fda07dcb02c3",
"content_id": "fa06381299f17a29a1a3f9e002d5c010b997c61f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 268,
"num_lines": 18,
"path": "/Assignment 0/Assignment-0_1_2.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nw=np.random.randn(2,1)\nb=np.random.randn()\n\nx_1=[0,0,1,1]\nx_2=[0,1,0,1]\nX=np.matrix([x_1,x_2])\n\ny=np.transpose(w)*X+np.matrix([[b,b,b,b]])\n\ny_hat=np.heaviside(y,0)\n\nprint(y_hat)\n#Can not make XOR function, because we need w1+b>1 and w2+b>1 thus w1+w2+2b>2. However we also need w1+w2+b≤0, this means b>2. However if b>2 then from w1+b>1 and w2+b>1 it follows that w1≥-1 and w2≥-1. Thus w1+w2+b>0. Contradiction. Thus we can not make XOR function.\n\n#To get XOR function we need H(b)=0, H(w1+b)=1, H(w2+b)=1 and H(w1+w2+b)=0. This means we need b≤0 and w1+b>0 and w2+b>0 thus w1+w2+2b>0. However we also need w1+w2+b≤0, this means b>0. This is in contradiction with b≤0, so we can not make XOR function this way.\n"
},
{
"alpha_fraction": 0.6545268297195435,
"alphanum_fraction": 0.7035137414932251,
"avg_line_length": 33.20175552368164,
"blob_id": "9cc409a76ba926ae048a0871d826d2711f87a973",
"content_id": "487da6b6b5e03d8c1d555160122327bbe505c3e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3899,
"license_type": "no_license",
"max_line_length": 226,
"num_lines": 114,
"path": "/Assignment 1/Assignment-1_5-xor_net.py",
"repo_name": "spacer730/Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport numpy.ma as ma\nimport matplotlib.pyplot as plt\nimport csv\n\n#Define all the activation functions\ndef sigmoid(x):\n\treturn 1/(1+np.exp(-x))\n\ndef relu(x):\n\treturn np.maximum(0,x)\n\n#Function that simulates the output of the Neural network for input nodes + weights. If one wants to use another activation function, remove the hashtag # in front of the code and put it in front of the code one wants to omit.\ndef xor_net(x1,x2,weights):\n\ty=np.dot(np.array([weights[0],weights[1]]),np.array([1,x1,x2]))\n\n\ty=list(map(lambda y: sigmoid(y), y))\n\tz=sigmoid(np.dot(np.array(weights[2]),np.array([1,y[0],y[1]])))\n\n\t#y=list(map(lambda y: np.tanh(y), y))\n\t#z=np.tanh(np.dot(np.array(weights[2]),np.array([1,y[0],y[1]])))\n\n\t#y=list(map(lambda y: relu(y), y))\n\t#z=relu(np.dot(np.array(weights[2]),np.array([1,y[0],y[1]])))\n\n\treturn z\n\n#Function for the mean squared error of the network for a given set of weights\ndef mse(weights):\n\tmse00=(0-xor_net(0,0,weights))**2\n\tmse01=(1-xor_net(0,1,weights))**2\n\tmse10=(1-xor_net(1,0,weights))**2\n\tmse11=(0-xor_net(1,1,weights))**2\n\tmse=mse00+mse01+mse10+mse11\n\treturn mse\n\n#Function that counts the missclassified XOR inputs for a given set of weights\ndef missclassified(weights):\n\tmissclassified=0\n\tif xor_net(0,0,weights)>0.5:\n\t\tmissclassified+=1\n\tif xor_net(0,1,weights)<=0.5:\n\t\tmissclassified+=1\n\tif xor_net(1,0,weights)<=0.5:\n\t\tmissclassified+=1\n\tif xor_net(1,1,weights)>0.5:\n\t\tmissclassified+=1\n\treturn missclassified\n\n#Function that calculates the gradient of the MSE for a given set of weights.\t\ndef grdmse(weights):\n\teps=0.001\n\tnumrows=weights.shape[0]\n\tnumcols=weights.shape[1]\n\tgrdmse=np.zeros((3,3))\n\tfor i in range(numrows):\n\t\tfor j in range(numcols):\n\t\t\ta=np.zeros((3,3))\n\t\t\ta[i][j]=eps\n\t\t\tgrdmse[i][j]=(mse(weights+a)-mse(weights))/eps\n\treturn grdmse\n\n#Algorithm to train the network for 4000 iterations. Weights are initialized by either drawing them from\n#a normal distribution with variance 1 and mean 0 or a uniform distribution from -1 to 1.\ndef trainnetwork(learningrate,IN):\n\tnp.random.seed(42)\n\tif IN == 'normal':\n\t\tweights=np.random.randn(3,3)\n\tif IN == 'uniform':\n\t\tweights=np.random.uniform(-1,1,9).reshape(3,3)\n\tcounter=0\n\tmselist=[]\n\tmissclassifiedlist=[]\n\twhile counter < 4000:\n\t\tweights=weights-learningrate*grdmse(weights)\n\t\tmselist.append(mse(weights))\n\t\tmissclassifiedlist.append(missclassified(weights))\n\t\tcounter+=1\n\treturn weights, mselist, missclassifiedlist\n\n\n#Get the intermediate results for the MSE and the # of misclassified units during the training for different learning rates and weight initializations\nweights, mselist, missclassifiedlist = np.full((2,3,3,3), 0),np.zeros((2,3,4000)),np.zeros((2,3,4000))\nlearningrate = [0.1,0.25,0.5]\nIN = ['normal','uniform']\nfor i in range (2):\n\tfor j in range(3):\n\t\tweights[i][j], mselist[i][j], missclassifiedlist[i][j] = trainnetwork(learningrate[j],IN[i])\n\n#Make a plot for the progress of the MSE and the # of misclassified units during the training for different learning rates and weight initializations\nfig=plt.figure()\nfor i in range(2):\n\tfor j in range(3):\n\t\t\tax=fig.add_subplot(2,3,3*i+j+1, label=\"1\")\n\t\t\tax2=fig.add_subplot(2,3,3*i+j+1, label=\"2\", frame_on=False)\n\n\t\t\tax.plot(range(len(mselist[i][j])), mselist[i][j], color=\"C0\")\n\t\t\tax.set_xticks([0,2000,4000])\n\t\t\tax.set_xlabel(\"Iterations\", color=\"k\")\n\t\t\tax.set_ylabel(\"MSE\", color=\"C0\")\n\t\t\tax.set_ylim([0,1])\n\t\t\tax.tick_params(axis='y', colors=\"C0\")\n\n\t\t\tax2.plot(range(len(missclassifiedlist[i][j])), missclassifiedlist[i][j], color=\"C1\")\n\t\t\tplt.text(1500, 2.8, r'$\\eta=$'+str(learningrate[j]))\n\t\t\tax2.set_xticks([0,2000,4000])\n\t\t\tax2.yaxis.tick_right()\n\t\t\tax2.set_ylabel('# missclassified units', color=\"C1\")\n\t\t\tax2.set_ylim([0,4])\n\t\t\tax2.yaxis.set_label_position('right')\n\t\t\tax2.tick_params(axis='y', colors=\"C1\")\n\nplt.subplots_adjust(hspace=0.3, wspace=1)\nplt.savefig('Activationfunction sigmoid')\n"
}
] | 18 |
KyleDeng/Kconfiglib
|
https://github.com/KyleDeng/Kconfiglib
|
77a5f5d417a63227872ae431f7de45de0afd05b3
|
a51994afa76f9f604da920d2a4f44b470103ff09
|
bee85d52382efce581c99003335b5e4f465363a1
|
refs/heads/master
| 2022-12-14T02:20:44.989292 | 2020-09-14T03:47:17 | 2020-09-14T03:47:17 | 290,096,832 | 0 | 0 |
ISC
| 2020-08-25T02:55:31 | 2020-08-24T08:13:22 | 2020-05-22T12:49:17 | null |
[
{
"alpha_fraction": 0.48230087757110596,
"alphanum_fraction": 0.4988937973976135,
"avg_line_length": 25.617647171020508,
"blob_id": "a15454121f8233ef318128b86bde82d3080d8e01",
"content_id": "383a862d5b5cf4c3c43d2e71c741530307696225",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 904,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 34,
"path": "/conf2h.py",
"repo_name": "KyleDeng/Kconfiglib",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\nimport sys\nimport os\n\n\ndef conf2h(conf, header):\n header_f = open(header, 'w')\n conf_f = open(conf, 'r')\n conf_lines = conf_f.readlines()\n for l in conf_lines:\n if l.startswith(\"CONFIG_\"):\n ori_key = l.split('=', 1)[0]\n ori_value = l.split('=', 1)[1]\n\n def_head = \"#define \"\n def_key = ori_key.replace(\"CONFIG_\", '', 1) + ' '\n def_value = ori_value if ori_value[0] != 'y' else ori_value.replace('y', '1', 1)\n\n header_f.write(def_head + def_key + def_value)\n elif l.startswith(\"#\"):\n header_f.write(l.replace('#', \"//\", 1))\n else:\n header_f.write(l)\n header_f.close()\n conf_f.close()\n\n\nif __name__ == \"__main__\":\n conf_file=sys.argv[1]\n h_file=sys.argv[2]\n if os.path.exists(conf_file):\n conf2h(conf_file, h_file)\n else:\n exit(1)"
}
] | 1 |
Gwydion-Smit/study-focus
|
https://github.com/Gwydion-Smit/study-focus
|
7fd6052d08a2fee3573e774f297daef31ac53083
|
6964639793a6cfde49e56c7c0b861813804ceaed
|
90f9844af7d60a974e5c535c0868b76b5387aae5
|
refs/heads/main
| 2023-03-12T20:03:33.180089 | 2021-02-28T12:24:15 | 2021-02-28T12:24:15 | 343,098,842 | 0 | 0 |
MIT
| 2021-02-28T12:23:14 | 2020-11-29T22:03:21 | 2020-11-29T21:57:29 | null |
[
{
"alpha_fraction": 0.5785058736801147,
"alphanum_fraction": 0.5975098013877869,
"avg_line_length": 28.688716888427734,
"blob_id": "26af62fc04e65cfb0adf0ec1cf743c4850c67f20",
"content_id": "f066562707b273cce0139dd156405167a46d05b5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7630,
"license_type": "permissive",
"max_line_length": 172,
"num_lines": 257,
"path": "/study_focus.py",
"repo_name": "Gwydion-Smit/study-focus",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom gaze_tracking import GazeTracking\nfrom tkinter import *\nfrom time import sleep\nfrom datetime import datetime, timedelta\nfrom focus_callabirate import Callibrate\nfrom platform import system\nfrom subprocess import check_output\nfrom pathlib import Path\nimport os\nimport pygame\nimport json\nfrom configparser import ConfigParser\n\nsystem_type = system()\n\nconfig = ConfigParser()\n\n\nwith open('config.json', 'r') as f:\n config = json.load(f)\n\nblacklist = config[\"blacklist\"]\nBREAKTIME = config['BREAKTIME']\nWORKTIME = config['WORKTIME']\nFLASHTIME = config['FLASHTIME']\n\nif system_type == \"Windows\":\n import wmi\n from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW\n manager = wmi.WMI()\nelif system_type == \"Linux\":\n import psutil\nelse:\n exit(\"Not a Usable Platform\")\n\n\ndef get_processes():\n names = []\n if system_type == \"Windows\":\n for process in manager.Win32_Process():\n names.append(process.Name)\n elif system_type == \"Linux\":\n for process in psutil.process_iter():\n names.append(process.name().split(\"/\")[0])\n names = list(set(names))\n return names\n\n\ndef get_programs():\n names = []\n if system_type == \"Windows\":\n links = list(Path(\"C:/ProgramData/Microsoft/Windows/Start Menu\").rglob(\"*.lnk\"))\n links += list(Path(\"C:/Users/\" + os.getenv(\"USERNAME\") + \"/AppData/Roaming/Microsoft/Windows/Start Menu/Programs\").rglob(\"*.lnk\"))\n links = [str(link) for link in links]\n startupinfo = STARTUPINFO()\n startupinfo.dwFlags |= STARTF_USESHOWWINDOW\n for link in links:\n if not re.search(\",\", link) and not re.search(\"\\(\", link):\n double_slashed = link.replace(\"\\\\\", \"\\\\\\\\\")\n cargs = [\"wmic\", \"path\", \"win32_shortcutfile\", \"where\", \"name=\\\"{0}\\\"\".format(double_slashed), \"get\", \"target\", \"/value\"]\n filename = list(filter(None, check_output(cargs, startupinfo=startupinfo).splitlines()))[0]\n if filename != b\"Target=\":\n names.append(filename.split(b\"\\\\\")[-1].decode(\"utf-8\"))\n elif system_type == \"Linux\":\n for program in check_output([\"/bin/bash\", \"-c\", \"compgen -c\"]).splitlines():\n temp = program.decode('utf-8')\n if re.search(\"[a-zA-Z]\", temp):\n names.append(temp)\n names = list(set(names))\n names.sort()\n return names\n\n\ncallabirator = Callibrate()\ncallabirator.calibrate()\nscreen_size = callabirator.get_screen_size()\ncallabirator.close_camera()\n\ngaze = GazeTracking()\nwebcam = cv2.VideoCapture(0)\n\nroot = Tk()\nroot.configure()\nroot.update_idletasks()\n\nwindow2 = Toplevel(root)\nwindow2.attributes('-fullscreen', True)\nwindow2.update_idletasks()\nw = window2.winfo_screenwidth()\nh = window2.winfo_screenheight()\n\n\ncanvas2 = Canvas(window2, bg='black', width=w, height=h)\ncanvas2.pack()\nwindow2.withdraw()\n\nsuccess = canvas2.create_text(w/2, h/2, fill=\"White\", text=\"YOU ARE NOT LOOKING AT THE SCREEN\")\n\nroot.bind(\"<Escape>\", lambda e: e.widget.quit())\n\nbad_count = 0\nbad_process = False\n\n\nprocess_name = \"\"\n\nflash = False\nflash_on = datetime.now()\nflash_off = datetime.now()\nchecking = 1\nprocesses = []\n\npygame.init()\npygame.mixer.init()\n\ncurrent = \"\"\nworking = True\n\nstart_time = datetime.now()\n\ncanvas3 = Canvas(root, bg='black', width=300, height=70)\ncanvas3.pack()\n\ndisplay_timer = canvas3.create_text(10, 5, fill=\"green\", anchor=NW)\n\nnotification =canvas3.create_text(10,35,fill=\"red\", anchor=NW)\n\n\ndef timer():\n global current\n global working\n global start_time\n global timer\n global canvas3\n global root\n\n if working:\n current = str(start_time+timedelta(minutes=WORKTIME) - datetime.now())\n\n else:\n current = str(start_time+timedelta(minutes=BREAKTIME) - datetime.now())\n\n current = current.split(\".\")[0]\n\n if current == \"0:00:15\":\n if working:\n canvas3.itemconfigure(notification, text=\"You need to take a break soon!\")\n else:\n canvas3.itemconfigure(notification, text=\"You need to get back to work soon!\")\n\n if current == \"0:00:00\" or \"-\" in current:\n window2.withdraw()\n working = not working\n start_time = datetime.now()\n canvas3.itemconfigure(notification, text=\"\")\n canvas3.itemconfigure(display_timer, text=current)\n root.after(1000, timer)\n\ndef main():\n global flash\n global flash_on\n global flash_off\n global checking\n global blacklist\n global success\n global bad_count\n global bad_process\n global process_name\n global processes\n\n _, frame = webcam.read()\n gaze.refresh(frame)\n frame = gaze.annotated_frame()\n h_ratio = gaze.horizontal_ratio()\n v_ratio = gaze.vertical_ratio()\n\n if working:\n checking -= 1\n if not checking:\n checking = 5\n processes = get_processes()\n for item in blacklist:\n if item in processes:\n process_name = item\n bad_process = True\n break\n\n if bad_process and not flash:\n if (datetime.now() - flash_off).total_seconds() > 3.0:\n flash_on = datetime.now()\n flash = True\n\n\n if h_ratio and v_ratio:\n if (1.2*h_ratio < screen_size[0] or h_ratio > 1.2*screen_size[2]) or (1.2*v_ratio < screen_size[1] or v_ratio > 1.2*screen_size[3]) and not gaze.is_blinking():\n bad_count = bad_count + 1\n if bad_count > 5:\n window2.deiconify()\n canvas2.itemconfigure(success, text=\"YOU ARE NOT LOOKING AT THE SCREEN\")\n\n\n elif (1.2*h_ratio > screen_size[0] or h_ratio < 1.2*screen_size[2]) or (1.2*v_ratio > screen_size[1] or v_ratio < 1.2*screen_size[3]):\n bad_count = 0\n window2.withdraw()\n\n elif gaze.is_blinking():\n pass\n\n else:\n bad_count = bad_count + 1\n if bad_count > 10:\n window2.deiconify()\n canvas2.itemconfigure(success, text=\"YOU ARE NOT LOOKING AT THE SCREEN\")\n bark = pygame.mixer.Sound('bark.wav')\n bark.play()\n\n\n if bad_process and flash:\n window2.deiconify()\n canvas2.itemconfigure(success, text=\"YOU ARE DOING SOMETHING NAUGHTY. CLOSE %s\" % (process_name))\n if (datetime.now() - flash_on).total_seconds() > 3.0:\n window2.withdraw()\n flash = False\n flash_off = datetime.now()\n bad_process = False\n\n else:\n if h_ratio and v_ratio:\n if (1.2*h_ratio > screen_size[0] or h_ratio < 1.2*screen_size[2]) or (1.2*v_ratio > screen_size[1] or v_ratio < 1.2*screen_size[3]) and not gaze.is_blinking():\n bad_count = bad_count + 1\n if bad_count > 6:\n\n window2.deiconify()\n canvas2.itemconfigure(success, text=\"YOU ARE LOOKING AT THE SCREEN. TAKE A BREAK\")\n bark = pygame.mixer.Sound('bark.wav')\n bark.play()\n\n\n elif (h_ratio < screen_size[0] or h_ratio > 1.2*screen_size[2]) or (1.2*v_ratio < screen_size[1] or v_ratio > 1.2*screen_size[3]) and not gaze.is_blinking():\n bad_count = 0\n window2.withdraw()\n else:\n bad_count = bad_count + 1\n if bad_count > 6:\n window2.deiconify()\n canvas2.itemconfigure(success, text=\"YOU ARE LOOKING AT THE SCREEN. TAKE A BREAK\")\n\n\n root.after(500, main)\n\nroot.after(5,timer())\nroot.after(5,main())\n\nroot.mainloop()\n\n#test\n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7512820363044739,
"avg_line_length": 44.882354736328125,
"blob_id": "2cd91940439d01a706c4338410893f0aa618d21a",
"content_id": "6c7fc21919c23344b61944e297486b69955c24af",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 780,
"license_type": "permissive",
"max_line_length": 172,
"num_lines": 17,
"path": "/README.md",
"repo_name": "Gwydion-Smit/study-focus",
"src_encoding": "UTF-8",
"text": "# Setup\n1. Clone this repo.\n2. Navigate to the cloned repo.\n3. Ensure you are using Python version 3.8.\n4. On Windows, follow these steps:\n\n - ``pip install cmake``\n - Install Visual Studio build tools from here.\n - In Visual Studio 2017 go to the Individual Components tab, Visual C++ Tools for Cmake, and check the checkbox under the \"Compilers, build tools and runtimes\" section.\n - ``pip install numpy opencv_python dlib``\n5. In your console, type ``python example.py``.\n\n# Known issues\n1. If your camera hangs between instances of running study-focus this will cause cv2 to error. Make sure your camera is detached from the previous study-focus instance\n2. Don't click Enter too many times or too quickly\n\nCreated for Manchester GreatUniHack with faridz and zoya\n"
},
{
"alpha_fraction": 0.5499455332756042,
"alphanum_fraction": 0.5931710600852966,
"avg_line_length": 38.884056091308594,
"blob_id": "be394e0e088e839b2e4edd6850fab30331510681",
"content_id": "717d2093d516276a40ddcffad122de89152dd1a3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2753,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 69,
"path": "/focus_callabirate.py",
"repo_name": "Gwydion-Smit/study-focus",
"src_encoding": "UTF-8",
"text": "\nimport cv2\nfrom gaze_tracking import GazeTracking\nfrom tkinter import *\n\nclass Callibrate(object):\n def __init__(self):\n self.gaze = GazeTracking()\n self.webcam = cv2.VideoCapture(0)\n self.hori = []\n self.verti = []\n self.circle = []\n self.i = 0\n self.window1 = Tk()\n pass\n\n def close_camera(self):\n self.webcam.release()\n\n def get_screen_size(self):\n self.hori = list(filter(None, self.hori))\n self.verti = list(filter(None, self.verti))\n return [min(self.hori), min(self.verti), max(self.hori), max(self.verti)]\n\n\n def calibrate(self):\n self.window1.attributes('-fullscreen', True)\n self.window1.update_idletasks()\n self.w = self.window1.winfo_screenwidth()\n self.h = self.window1.winfo_screenheight()\n self.coords = [(10, 10, 30, 30),\n (self.w-10, self.h-10, self.w-30, self.h-30),\n (self.w/2+10, 10, self.w/2-10, 30),\n (self.w/2+10, self.h-10, self.w/2-10, self.h-30),\n (10, self.h-10, 30, self.h-30),\n (self.w-10, 10, self.w-30, 30),\n (10, self.h/2-10, 30, self.h/2+10),\n (self.w-10, self.h/2-10, self.w-30, self.h/2+10)]\n self.canvas = Canvas(self.window1, bg='black', width=self.w, height=self.h)\n self.canvas.pack()\n self.display = self.canvas.create_text(self.w/2, self.h/2, fill=\"white\", text=\"Press enter to start. Look at the white ball.\")\n self._initialise_balls()\n self.window1.bind(\"<Escape>\", lambda e: e.widget.quit())\n self.window1.bind(\"<Return>\", self._callabirate)\n self.window1.mainloop()\n\n def _initialise_balls(self):\n for i in range(len(self.coords)):\n self.circle.append(self.canvas.create_oval(0, 0, 0, 0, outline='white', fill='white'))\n\n def _callabirate(self, event = None):\n self.canvas.coords(self.circle[self.i], self.coords[self.i])\n _, frame = self.webcam.read()\n self.window1.after(2000)\n self.gaze.refresh(frame)\n frame = self.gaze.annotated_frame()\n self.hori.append(self.gaze.horizontal_ratio())\n self.verti.append(self.gaze.vertical_ratio())\n self.window1.after(3000, self._success)\n\n def _deleteBall(self):\n self.canvas.move(self.circle[self.i], -10000, -10000)\n\n def _success(self):\n self.canvas.itemconfigure(self.display, text=\"Amazing! Press enter for the next ball please :)\")\n self._deleteBall()\n if self.i == len(self.coords) - 1:\n self.canvas.itemconfigure(self.display, text=\"Finished! We will now start the application.\")\n self.window1.after(1000, lambda: self.window1.destroy())\n self.i = self.i + 1\n"
}
] | 3 |
xxxfzxxx/ForwardLab
|
https://github.com/xxxfzxxx/ForwardLab
|
a4f3bd99999f1899a629596b65f8605a3d3a778b
|
9af30b912867cd3f72f6cf280ab7f97baf3b81e1
|
0788877dbbbfb35bf5ee78b0cd4ddb66aad39711
|
refs/heads/main
| 2023-06-08T12:19:11.662192 | 2021-06-27T04:27:39 | 2021-06-27T04:27:39 | 369,698,834 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.684684693813324,
"alphanum_fraction": 0.684684693813324,
"avg_line_length": 14.142857551574707,
"blob_id": "59e2446520daf20f03cf2e0e07c10ae2ffbebf94",
"content_id": "77ac46be6756e64a52330f5dbebe14215329c5f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 7,
"path": "/week3/demo.py",
"repo_name": "xxxfzxxx/ForwardLab",
"src_encoding": "UTF-8",
"text": "from elasticsearch import Elasticsearch\r\n\r\nes = Elasticsearch(\r\n http_auth=(\"elastic\",\"0qe1j5wNTgYe1sMmwPv8aXiw\")\r\n)\r\n\r\nes"
},
{
"alpha_fraction": 0.6399999856948853,
"alphanum_fraction": 0.645714282989502,
"avg_line_length": 19.625,
"blob_id": "c32d5ef18dfbe6482d15ff13d891d49057f4636d",
"content_id": "d9f33947c4d5d140101c5588228998e26cfa1404",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 8,
"path": "/week2/demo.py",
"repo_name": "xxxfzxxx/ForwardLab",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef load_dataset(path):\r\n df = pd.read_csv(path, index_col=0)\r\n df.fillna('unknown author', inplace=True)\r\n return df\r\n\r\n"
},
{
"alpha_fraction": 0.3968620300292969,
"alphanum_fraction": 0.4014766812324524,
"avg_line_length": 33.95161437988281,
"blob_id": "e1b60769adb7252b019fe78693543fbe656396fd",
"content_id": "1ab7d498ac5b9480a7bf4de1700519bfad9eae3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2313,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 62,
"path": "/week4/ES-engine-demo/data/query.py",
"repo_name": "xxxfzxxx/ForwardLab",
"src_encoding": "UTF-8",
"text": "import time\nimport json\n# coding: utf-8\nfrom elasticsearch import Elasticsearch\n\ndef do_the_search(query):\n #使用TF-IDF计算相似度\n body = {\n \"query\":{\n \"bool\":{\n \"must\":{\n #文档与query匹配的条件(必须满足):\n \"multi_match\": {\n \"fields\": [ \"title\",\"abstract\"], \n \"query\" : query,\n \"analyzer\":\"snowball\",#query和文档都使用snowball analyzer\n \"fuzziness\": \"AUTO\", #模糊匹配\n \"minimum_should_match\":\"75%\"#指定匹配的term数\n }\n }, \n #如果与query能完全匹配,score更高\n \"should\":[\n {\"match_phrase\":{\n \"title\":{\n \"query\" : query,\n \"slop\":5,#phrase中term距离<=5的score更高 \n \"boost\":2 # 在title中匹配的重要性是paperAbstract中的2倍\n }\n } \n },\n {\"match_phrase\":{\n \"abstract\":{\n \"query\" : query,\n \"slop\":5,#phrase中term距离<=5的score更高 \n }\n }\n }\n ]\n }\n }\n }\n es=Elasticsearch()\n search = es.search(index=\"academic\", doc_type=\"article\",body=body)\n \n return search\n\nif __name__ == '__main__':\n time_start = time.time()\n search=do_the_search(\"pediatric\")\n title = [str(hit['_source']['title']) for hit in search['hits']['hits']]\n scores = [hit['_score'] for hit in search['hits']['hits']]\n authors = [str(hit['_source']['authors']) for hit in search['hits']['hits']]\n hits = [hit for hit in search['hits']['hits']]\n \n for i in range(len(search)):\n print('result',i+1,':','score:',scores[i])\n print('title:',title[i])\n print('authors:',authors[i])\n print(\"hits: \",hits[i])\n print('--------------------------------------------------------------')\n time_end = time.time()\n print(\"total time {} s\".format(time_end-time_start))\n"
},
{
"alpha_fraction": 0.3017769753932953,
"alphanum_fraction": 0.3062193691730499,
"avg_line_length": 33.903743743896484,
"blob_id": "795a67384d95071931a6ba2c8ba09d541515bcdd",
"content_id": "e3bdb391086ea3b33475be54c919b3ebda3e8bcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6620,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 187,
"path": "/week4/ES-engine-demo/data/load.py",
"repo_name": "xxxfzxxx/ForwardLab",
"src_encoding": "UTF-8",
"text": "from elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\nfrom elasticsearch.exceptions import TransportError\nimport json\nimport os\nimport time\nstart_time = time.time()\n\nclass ElasticObj:\n def __init__(self, index_name, index_type):\n \"\"\"\n index_name: 索引名称\n index_type: 索引类型\n \"\"\"\n self.index_name = index_name\n self.index_type = index_type\n # 无用户名密码状态\n self.es = Elasticsearch([\"localhost\"], port=9200)\n # 用户名密码状态\n # self.es = Elasticsearch([ip],http_auth=(\"elastic\", \"password\"),port=9200)\n\n def create_index(self):\n # 创建映射\n print(\"creating index...\")\n index_mappings = {\n\n \"mappings\": {\n self.index_type: {\n \"properties\": {\n \"id\": { \n \"type\": \"keyword\",\n \"index\": \"false\"\n },\n \"title\": { \n \"type\": \"text\",\n \"analyzer\": \"snowball\"\n },\n \"authors\": { \n \"type\": \"nested\",\n \"properties\": {\n \"name\": {\"type\": \"text\", \"index\": \"false\"},\n \"org\": {\"type\": \"text\", \"index\": \"false\"},\n \"org_id\": {\"type\": \"text\", \"index\": \"false\"},\n \"id\": {\"type\": \"text\", \"index\": \"false\"}\n }\n },\n \n \"venue\": { \n \"properties\": {\n \"id\" : {\"type\": \"text\", \"index\": \"false\"},\n \"name\": {\"type\": \"text\", \"index\": \"false\"}\n }\n },\n \"year\": { \n \"type\": \"integer\",\n \"index\": \"false\"\n },\n \"keywords\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"fos\": { \n \"type\": \"nested\",\n \"properties\": {\n \"name\": {\"type\": \"text\", \"index\": \"false\"},\n \"w\": {\"type\": \"float\", \"index\": \"false\"}\n }\n },\n \"n_citation\": { \n \"type\": \"integer\",\n \"index\": \"false\"\n },\n \"page_start\": { \n \"type\": \"integer\",\n \"index\": \"false\"\n },\n \"page_end\": { \n \"type\": \"integer\",\n \"index\": \"false\"\n },\n \"doc_type\": {\n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"lang\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"publisher\": {\n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"volume\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"issue\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"issn\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"isbn\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"doi\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"pdf\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"url\": { \n \"type\": \"text\",\n \"index\": \"false\"\n },\n \"abstract\": { \n \"type\": \"text\",\n \"analyzer\": \"snowball\"\n }\n }\n }\n }\n }\n\n try:\n print(\"try...\")\n self.es.indices.create(\n index=self.index_name,\n body=index_mappings, ignore=[400, 404])\n except TransportError as e:\n print(\"except\")\n # ignore already existing index\n if e.error == \"index_already_exists_exception\":\n pass\n else:\n raise\n\n # 插入数据\n def insert_data(self, inputfile):\n time_start = time.time()\n print(\"inserting data...\")\n path1=os.path.abspath(\".\") \n f = open(path1+inputfile, \"r\", encoding=\"UTF-8\")\n\n ACTIONS = []\n i = 1\n bulk_num = 2000\n for list_line in f.readlines():\n action = {\n \"_index\": self.index_name,\n \"_type\": self.index_type,\n \"_id\": i, # _id 也可以默认生成,不赋值\n \"_source\": json.loads(list_line)\n }\n i += 1\n ACTIONS.append(action)\n # 批量处理\n \n if len(ACTIONS) == bulk_num:\n print(\"index data\", int(i))\n success, _ = bulk(client=self.es, actions=ACTIONS, raise_on_error=False)\n del ACTIONS[0:len(ACTIONS)]\n print(\"complete del\")\n\n if len(ACTIONS) > 0:\n success, _ = bulk(client=self.es, actions=ACTIONS, raise_on_error=False)\n del ACTIONS[0:len(ACTIONS)]\n print(\"Performed %d actions\" % success)\n\n f.close()\n print(\"177\")\n\n\nif __name__ == \"__main__\":\n \n obj = ElasticObj(\"academic\", \"article\")\n obj.create_index()\n obj.insert_data(\"/pagerank_result.json\")\n print(\"here\")\n \ntime_end = time.time()\nprint(\"total time {} s\".format(time_end-start_time)) "
},
{
"alpha_fraction": 0.8409090638160706,
"alphanum_fraction": 0.8409090638160706,
"avg_line_length": 13.666666984558105,
"blob_id": "dc3c94ca305a6383fcc611f2e60b5e09faf0dc86",
"content_id": "f4ce0b209d130aad5401cf24824ca7f17a45b253",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 3,
"path": "/README.md",
"repo_name": "xxxfzxxx/ForwardLab",
"src_encoding": "UTF-8",
"text": "# ForwardLab\n\nsummer research participation\n"
}
] | 5 |
YH-Cali/metatlas
|
https://github.com/YH-Cali/metatlas
|
3ba699f4a25332ae8775454a41dc280f180b0a58
|
aa1e29b5e81c90b345290d88b2e878213e9df19a
|
b5e3e90a77f9254b6c911a0b1d3221c835746335
|
refs/heads/master
| 2020-04-10T02:25:18.772200 | 2019-05-23T22:33:50 | 2019-05-23T22:33:50 | 160,743,903 | 0 | 0 |
BSD-3-Clause
| 2018-12-06T23:10:03 | 2018-12-06T23:10:06 | 2019-05-23T22:33:51 |
Python
|
[
{
"alpha_fraction": 0.6012024283409119,
"alphanum_fraction": 0.6192384958267212,
"avg_line_length": 28.352941513061523,
"blob_id": "9f2338e64e03a73fe6bfb43fc96fa4950c79348e",
"content_id": "ccafca5ea24c795f09b5ee499c811d1704fddb0c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 499,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 17,
"path": "/scripts/run_file_converter.sh",
"repo_name": "YH-Cali/metatlas",
"src_encoding": "UTF-8",
"text": "#!/bin/bash -l\n\nmkdir -p \"$HOME/tmp\"\nPIDFILE=\"$HOME/tmp/file_converter.pid\"\n\nif [ -e \"${PIDFILE}\" ] && (ps -u $(whoami) -opid= |\n grep -P \"^\\s*$(cat ${PIDFILE})$\" &> /dev/null); then\n echo \"Already running.\"\n exit 99\nfi\n\nLOGFILE=\"/global/project/projectdirs/metatlas/file_converter.log\"\nMET_PATH=/global/common/software/m2650/python-cori/bin/python\n$MET_PATH -m metatlas.file_converter \"$MET_PATH/raw_data/\" &> ${LOGFILE} &\n\necho $! > \"${PIDFILE}\"\nchmod 644 \"${PIDFILE}\"\n"
},
{
"alpha_fraction": 0.45150992274284363,
"alphanum_fraction": 0.46945643424987793,
"avg_line_length": 35.56151580810547,
"blob_id": "3f87f52d04207c02f52c17e6b3a958cc6abc137b",
"content_id": "156465360dd604ede211cd6f08ec3ecfa496a2fc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11590,
"license_type": "permissive",
"max_line_length": 164,
"num_lines": 317,
"path": "/metatlas/helpers/chromplotplus.py",
"repo_name": "YH-Cali/metatlas",
"src_encoding": "UTF-8",
"text": "from matplotlib import pyplot as plt\nfrom matplotlib import collections as mc\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nimport warnings\nfrom time import time\nfrom textwrap import wrap\nfrom sys import maxsize\n\n#######################\n#letter gen\n#######################\ndef letter_gen():\n label = 'A'\n yield label\n\n while True:\n label_noz = label.rstrip('Z')\n label_list = [ord(l) for l in label_noz]\n\n if len(label_noz) != 0:\n label_list[-1] += 1\n else:\n label_list.append(65)\n\n for i in range(len(label) - len(label_noz)):\n label_list.append(65)\n\n label = ''.join(chr(l) for l in label_list)\n yield label\n\n\n#######################\n#normalize_data\n#######################\ndef normalize_data(data, Names, x_offset, y_offset, sub_x, sub_y):\n\n X_min = maxsize\n X_max = 0\n Y_min = maxsize\n Y_max = 0\n\n for d in data:\n for r in d['data']['eic']['rt']:\n if np.amin(np.asarray(r)) < X_min:\n X_min = r\n if np.amax(np.asarray(r)) > X_max:\n X_max = r\n for i in d['data']['eic']['intensity']:\n if np.amin(np.asarray(i)) < Y_min:\n Y_min = i\n if np.amax(np.asarray(i)) > Y_max:\n Y_max = i\n\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n x_mag = int(np.log10(X_max - X_min))\n except (ValueError, OverflowError):\n x_mag = 0\n\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n y_mag = int(np.log10(Y_max - Y_min))\n except (ValueError, OverflowError):\n y_mag = 0\n\n\n x_range = (max(0, np.round(X_min - .5*(10 ** x_mag), int(-1*x_mag))), np.round(X_max + .5*(10 ** x_mag), int(-1*x_mag)))\n y_range = (max(0, np.round(Y_min - .5*(10 ** y_mag), int(-1*y_mag))), np.round(Y_max + .5*(10 ** y_mag), int(-1*y_mag)))\n\n\n def scale_x(x):\n return (x_offset - (x_offset - sub_x))*(x - x_range[0])/(x_range[1] - x_range[0])\n\n def scale_y(y):\n return (y_offset - (y_offset - sub_y))*(y - y_range[0])/(y_range[1] - y_range[0])\n\n Groups = [d['group'].name for d in data]\n X = [scale_x(np.asarray(d['data']['eic']['rt'])) for d in data]\n Y = [scale_y(np.asarray(d['data']['eic']['intensity'])) for d in data]\n RT_mins = [scale_x(d['identification'].rt_references[0].rt_min) for d in data]\n RT_maxs = [scale_x(d['identification'].rt_references[0].rt_max) for d in data]\n RT_peaks = [scale_x(d['identification'].rt_references[0].rt_peak) for d in data]\n\n return {'x_range': x_range,\n 'y_range': y_range,\n 'y_mag': y_mag,\n 'data': np.array(zip(Groups, Names, X, Y, RT_mins, RT_maxs, RT_peaks),\n dtype=[('group', 'object'),('name', 'object'),('x','object'),('y','object'),('rt_min','float'),('rt_max','float'),('rt_peak','float')])\n }\n\n\n#######################\n#chromplotplus\n#######################\ndef chromplotplus(kwargs):\n\n file_name = kwargs['file_name']\n\n ##Options\n warnings.simplefilter('ignore', FutureWarning)\n share_y = kwargs['share_y']\n group = kwargs['group']\n\n #Subplot size and seperations\n sub_x = 8\n sub_y = 6\n x_offset = 9\n y_offset = 7\n\n #Normalize data\n norm = normalize_data(kwargs['data'], kwargs['names'], y_offset, x_offset, sub_x, sub_y)\n\n x_range = norm['x_range']\n y_range = norm['y_range']\n y_mag = norm['y_mag']\n data = norm['data']\n\n #Groups\n groups = {} # stores \"group name: [index of first data of group, # of data belonging to group]\"\n if group == 'page' or group == 'index' or group == 'sort':\n data = sorted(data, key=lambda d: d['group'])\n for i, d in enumerate(data):\n if groups.get(d['group']) == None:\n groups[d['group']] = [i, 1]\n else:\n groups[d['group']][1] += 1\n\n\n #Subplot arrangement\n n_plots_list = []\n n_rows_list = []\n n_cols_list = []\n\n if group == 'page':\n for g in sorted(groups.keys()):\n n_plots_list.append(groups[g][1])\n n_rows_list.append(int(np.ceil(np.sqrt(13.0*(groups[g][1])/11))))\n n_cols_list.append(int(np.ceil((groups[g][1])/float(n_rows_list[-1]))))\n elif group == 'index':\n n_plots_list.append(len(data))\n n_rows_list.append(int(np.ceil(np.sqrt(13.0*(n_plots_list[0]+len(groups))/11))))\n n_cols_list.append(int(np.ceil((n_plots_list[0]+len(groups))/float(n_rows_list[0]))))\n else:\n n_plots_list.append(len(data))\n n_rows_list.append(int(np.ceil(np.sqrt(13.0*n_plots_list[0]/11))))\n n_cols_list.append(int(np.ceil(n_plots_list[0]/float(n_rows_list[0]))))\n\n #Hashmark variables\n hash_n = 5\n hash_m = 5\n hash_l = .02*min(sub_x, sub_y)\n\n #Axis values\n x_values = np.linspace(x_range[0], x_range[1], num=hash_n)\n y_values = np.linspace(y_range[0]/(10 ** int(y_mag)), y_range[1]/(10 ** y_mag), num=hash_m)\n\n def plot():\n #Plot creation\n fig = plt.figure()\n ax = plt.subplot(111)\n plt.setp(ax, 'frame_on', False)\n plt.ioff()\n ax.set_ylim([sub_y - y_offset, (n_cols)*y_offset + (y_offset - sub_y)])\n ax.set_xlim([sub_x - x_offset, (n_rows)*x_offset + (x_offset - sub_x)])\n ax.set_xticks([])\n ax.set_yticks([])\n ax.grid('off')\n ax.margins(1)\n\n #Group title\n if group == 'page':\n plt.title(\"\\n\".join(wrap(g,54)), size = 12., weight='bold')\n\n #Coordinate lists for lines to be drawn\n boxes = []\n hashes = []\n rt_edges = []\n rt_peaks = []\n\n #Letter generator for labeling groups\n if group == 'index':\n lg = letter_gen()\n\n c = 0 #counter for plots\n for j in np.arange(n_cols - 1, -1, -1):\n for i in np.arange(n_rows):\n\n #Repeating calculations\n x_delta = i*x_offset\n y_delta = j*y_offset\n\n #Break if everything is plotted\n if c >= n_plots:\n break\n\n #Label new groups\n if group == 'index' and c in [v[0] for v in groups.values() if v is not None]:\n ax.annotate(\"\\n\".join(wrap(next(lg),54)),\n (x_delta + .5*sub_x, y_delta + .5*sub_y - (y_offset - sub_y)),\n ha='center', va='center', size = 240./(n_cols+.25), weight='bold')\n\n for k,v in groups.items():\n if v is not None and v[0] == c:\n groups[k][0] = None\n continue\n\n #Retention Times\n rt_min = d[c]['rt_min'] + x_delta\n rt_max = d[c]['rt_max'] + x_delta\n rt_peak = d[c]['rt_peak'] + x_delta\n\n rt_edges.append([(rt_min, y_delta), (rt_min, sub_y + y_delta)])\n rt_edges.append([(rt_max, y_delta), (rt_max, sub_y + y_delta)])\n rt_peaks.append([(rt_peak, y_delta), (rt_peak, sub_y + y_delta)])\n\n #Data\n if len(d[c]['x']) > 1:\n x = d[c]['x'] + x_delta\n y = d[c]['y'] + y_delta\n ax.plot(x, y, 'k-',linewidth=2.0/np.sqrt(n_cols*n_rows),alpha=1.0)\n myWhere = np.logical_and(x>=rt_min, x<=rt_max )\n ax.fill_between(x, y_delta, y, myWhere, facecolor='c', edgecolor='c', linewidth=0, alpha=0.3)\n\n #Boxes\n boxes.append([(x + x_delta, y + y_delta) for x,y in\n [(0, 0), (sub_x, 0), (sub_x, sub_y), (0, sub_y), (0, 0)]])\n\n #Subplot Titles\n ax.annotate(\"\\n\".join(wrap(d[c]['name'],48)),\n (x_delta + .5*sub_x, y_delta + sub_y + .1*(y_offset - sub_y)),\n ha='center', size = 8./(n_cols+.25), weight='bold')\n\n #Hashmarks and associated labels\n for axis in ['bottom', 'left', 'right', 'top']:\n\n #Horizontal range\n for k in range(0, hash_n):\n if axis == 'bottom':\n start = (k*(1.0/hash_n)*sub_x + x_delta, y_delta)\n end = (k*(1.0/hash_n)*sub_x + x_delta, hash_l + y_delta)\n\n #X-axis labels\n ax.annotate(x_values[k], (start[0], start[1] - .15*(y_offset - sub_y)),\n ha='center', size = 8./(n_cols+.25))\n\n hashes.append([start, end])\n\n if axis == 'top':\n start = (k*(1.0/hash_n)*sub_x + x_delta, sub_y + y_delta)\n end = (k*(1.0/hash_n)*sub_x + x_delta, sub_y - hash_l + y_delta)\n\n #Y-axis magnitude labels\n if k == 0 and (share_y == False or i == 0):\n ax.annotate('1e{}'.format(y_mag), (start[0], start[1] + .1*(y_offset - sub_y)),\n ha='center', va='center', size = 8./(n_cols+.25))\n\n hashes.append([start, end])\n\n #Vertical range\n for l in range(0, hash_m):\n if axis == 'left':\n start = (x_delta, l*(1.0/hash_m)*sub_y + y_delta)\n end = ((hash_l + x_delta, l*(1.0/hash_m)*sub_y + y_delta))\n\n #Y-axis labels for leftmost subplots\n if share_y == False or i == 0:\n ax.annotate(y_values[l], (start[0] - .15*(x_offset - sub_x), start[1]),\n ha='right', va='center', size = 8./(n_cols+.25))\n\n hashes.append([start, end])\n\n if axis == 'right':\n start = (sub_x + x_delta, l*(1.0/hash_m)*sub_y + y_delta)\n end = (sub_x - hash_l + x_delta, l*(1.0/hash_m)*sub_y + y_delta)\n\n hashes.append([start, end])\n\n c += 1 #increment plot counter\n\n #Make line colelctions\n bc = mc.LineCollection(boxes, colors=(0,0,0,1), linewidths=1.0/np.sqrt(n_cols*n_rows))\n hc = mc.LineCollection(hashes, colors=(0,0,0,1), linewidths=1.0/np.sqrt(n_cols*n_rows))\n rc = mc.LineCollection(rt_edges, colors=(0,0,0,1), linewidths=2.0/np.sqrt(n_cols*n_rows))\n pc = mc.LineCollection(rt_peaks, colors=(1,0,0,1), linewidths=2.0/np.sqrt(n_cols*n_rows))\n\n #Draw lines\n ax.add_collection(bc)\n ax.add_collection(hc)\n ax.add_collection(rc)\n ax.add_collection(pc)\n\n plt.rcParams['pdf.fonttype'] = 42\n plt.rcParams['pdf.use14corefonts'] = True\n plt.rcParams['text.usetex'] = False\n pdf.savefig()\n plt.close()\n\n\n with PdfPages(file_name) as pdf:\n if group == 'page':\n for i, g in enumerate(sorted(groups.keys())):\n n_rows = n_rows_list[i]\n n_cols = n_cols_list[i]\n n_plots = n_plots_list[i]\n d = data[groups[g][0]:groups[g][0] + groups[g][1]]\n\n plot()\n else:\n n_rows = n_rows_list[0]\n n_cols = n_cols_list[0]\n n_plots = n_plots_list[0]\n d = data\n\n plot()\n"
}
] | 2 |
underseatravel/AlgorithmQIUZHAO
|
https://github.com/underseatravel/AlgorithmQIUZHAO
|
e2340ad8f3e723fd1e25fe678bdd547bb76483e0
|
b41e4ddae5e31074992d0b96bd029fcb6291d2ed
|
76f7cc243a743904c24dc328d057306720251e30
|
refs/heads/master
| 2022-11-29T05:25:00.338298 | 2020-08-22T10:23:17 | 2020-08-22T10:23:17 | 279,102,613 | 0 | 0 | null | 2020-07-12T16:22:04 | 2020-07-09T03:43:51 | 2020-07-09T03:43:49 | null |
[
{
"alpha_fraction": 0.4977777898311615,
"alphanum_fraction": 0.5688889026641846,
"avg_line_length": 24,
"blob_id": "45c5dbc6da6b922e8d3b72e76035c15b2e550c7f",
"content_id": "ae4b4174924d50bc2433d87fb463e861238e5e84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 9,
"path": "/Week_06/151_reverse_words_in_a_string.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/17 2:15\n# @Author : weiyu\n# @File : 151_reverse_words_in_a_string.py\n\nclass Solution:\n def reverseWords(self, s):\n return \" \".join(s.split()[::-1])\n"
},
{
"alpha_fraction": 0.49024391174316406,
"alphanum_fraction": 0.5268292427062988,
"avg_line_length": 21.77777862548828,
"blob_id": "df55170d0421ee28fa091c5351b8a567e5a49022",
"content_id": "81c734f5cfd030bbffe71ec7b2d6753c978f5bc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 18,
"path": "/Week_02/589_n_ary_tree_preorder_traversal.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/22 0:43\n# @Author : weiyu\n# @File : 589_n_ary_tree_preorder_traversal.py\n\n\nclass Solution:\n def preorder(self, root):\n res = []\n self.dfs(root, res)\n return res\n\n def dfs(self, root, res):\n if root:\n res.append(root.val)\n for child in root.children:\n self.dfs(child, res)\n"
},
{
"alpha_fraction": 0.3910675346851349,
"alphanum_fraction": 0.43355119228363037,
"avg_line_length": 27.71875,
"blob_id": "4488abd9137063db9355e72a6bbd40f24c175e87",
"content_id": "d30617d5400c843974cfcd59df6c4ee322d7b0df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 918,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 32,
"path": "/Week_04/64_minumun_path_sum.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/8 18:06\n# @Author : weiyu\n# @File : 64_minumun_path_sum.py\n\n# DP Time O(mn) Space O(mn)\nclass Solution:\n def minPathSum(self, grid):\n dp = grid\n m, n = len(grid), len(grid[0])\n for i in range(1, m):\n dp[i][0] += dp[i - 1][0]\n for j in range(1, n):\n dp[0][j] += dp[0][j - 1]\n for i in range(1, m):\n for j in range(1, n):\n dp[i][j] += min(dp[i - 1][j], dp[i][j - 1])\n return dp[-1][-1]\n\n# DP Time O(mn) Space O(n)\nclass Solution:\n def minPathSum(self, grid):\n dp = grid[0]\n m, n = len(grid), len(grid[0])\n for j in range(1, n):\n dp[j] += dp[j - 1]\n for i in range(1, m):\n dp[0] += grid[i][0]\n for j in range(1, n):\n dp[j] = grid[i][j] + min(dp[j], dp[j - 1])\n return dp[-1]"
},
{
"alpha_fraction": 0.49790793657302856,
"alphanum_fraction": 0.573221743106842,
"avg_line_length": 22.899999618530273,
"blob_id": "29789cbecc3d5f4ed6f2fbd576a8d4182cf89fb3",
"content_id": "01a515e8aeedf4615e9fa9d17a9883ea8a953bd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 10,
"path": "/Week_06/557_reverse_words_in_a_string3.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 10:54\n# @Author : weiyu\n# @File : 557_reverse_words_in_a_string3.py\n\n\nclass Solution:\n def reverseWords(self, s):\n return \" \".join(c[::-1] for c in s.split())\n"
},
{
"alpha_fraction": 0.44680851697921753,
"alphanum_fraction": 0.5035461187362671,
"avg_line_length": 27.266666412353516,
"blob_id": "3cbb6fb2ef358304c99bb79b540b7f93f60e1e3a",
"content_id": "cfff8c4bfcb71a1b9284b8e4d6667cd24e3a4998",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 423,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 15,
"path": "/Week_06/205_isomorphic_strings.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 13:48\n# @Author : weiyu\n# @File : 205_isomorphic_strings.py\n\n\nclass Solution:\n def isIsomorphic(self, s, t):\n d1, d2 = {}, {}\n for i, val in enumerate(s):\n d1[val] = d1.get(val, []) + [i]\n for i, val in enumerate(t):\n d2[val] = d2.get(val, []) + [i]\n return sorted(d1.values()) == sorted(d2.values())"
},
{
"alpha_fraction": 0.4787701368331909,
"alphanum_fraction": 0.5065885782241821,
"avg_line_length": 22.586206436157227,
"blob_id": "bfac336733da5de7026a36672baa894208b2ebea",
"content_id": "ab8f5711bbbc663831aed634666f0a2a03b46849",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 683,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 29,
"path": "/Week_06/146_lru_cache.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/17 18:26\n# @Author : weiyu\n# @File : 146_lru_cache.py\nimport collections\n\nclass LRUCache:\n def __init__(self, capacity):\n self.dic = collections.OrderedDict()\n self.remain = capacity\n\n def get(self, key):\n if key not in self.dic:\n return -1\n v = self.dic.pop(key)\n self.dic[key] = v\n return v\n\n\n def put(self, key, value):\n if key in self.dic:\n self.dic.pop(key)\n else:\n if self.remain > 0:\n self.remain -= 1\n else:\n self.dic.popitem(last = False)\n self.dic[key] = value"
},
{
"alpha_fraction": 0.4977777898311615,
"alphanum_fraction": 0.521481454372406,
"avg_line_length": 27.08333396911621,
"blob_id": "861bebdf46dc2b4ce75b195efb2c01491cc8f36a",
"content_id": "0dd390b9aeaef42e6f88fdef8c18384f6f4c0031",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 24,
"path": "/Week_02/46_permutations.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/25 23:00\n# @Author : weiyu\n# @File : 46_permutations.py\n\n# 递归\nclass Solution:\n def permute(self, nums):\n res = []\n self.recursion(nums, [], res)\n return res\n\n def recursion(self, nums, path, res):\n if len(nums) == len(path):\n res.append(path)\n for i in list(set(nums).difference(set(path))):\n self.recursion(nums, path + [i], res)\n\n # def recursion(self, nums, path, res):\n # if not nums:\n # res.append(path)\n # for i in range(len(nums)):\n # self.recursion(nums[:i] + nums[i + 1:], path + [nums[i]], res)\n\n"
},
{
"alpha_fraction": 0.3932432532310486,
"alphanum_fraction": 0.4418918788433075,
"avg_line_length": 25.39285659790039,
"blob_id": "d7b130d6cc3af10604dd7808efbd1e6b92f72b44",
"content_id": "f66c7325eb8482d082bf08b6b67772459f13d1df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 740,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 28,
"path": "/Week_01/42_trapping_rain_water.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/17 23:04\n# @Author : weiyu\n# @File : 42_trapping_rain_water.py\n\n\nclass Solution:\n def trap(self, height):\n stack = []\n res, idx = 0, 0\n length = len(height)\n if length < 3: return 0\n while idx < length:\n while stack and height[idx] > height[stack[-1]]:\n top = stack.pop()\n if stack == []:\n break\n h = min(height[idx], height[stack[-1]]) - height[top]\n w = idx - stack[-1] - 1\n res += h * w\n stack.append(idx)\n idx += 1\n return res\n\n\nt = Solution()\nprint(t.trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]))\n\n"
},
{
"alpha_fraction": 0.36533957719802856,
"alphanum_fraction": 0.42388758063316345,
"avg_line_length": 24.176469802856445,
"blob_id": "2d4b9b8a9343d71bdcfc152a1a68d3d57ae7036f",
"content_id": "61a4bf3b7786feae46878c625401f54ae2ab1112",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 427,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/Week_06/680_valid_palindrome2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 13:30\n# @Author : weiyu\n# @File : 680_valid_palindrome2.py\n\n\nclass Solution:\n def validPalindrome(self, s):\n l, r = 0, len(s) - 1\n while l < r:\n if s[l] != s[r]:\n one, two = s[l:r], s[l + 1:r + 1]\n return one == one[::-1] or two == two[::-1]\n l += 1\n r -= 1\n return True"
},
{
"alpha_fraction": 0.48326361179351807,
"alphanum_fraction": 0.5230125784873962,
"avg_line_length": 25.61111068725586,
"blob_id": "6e297b75157b5535226ccd490b00fc22867e559e",
"content_id": "283dc2498de089fd63fb5968cd2299bb25a178af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 18,
"path": "/Week_02/347_top_k_frequent_element.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/23 18:46\n# @Author : weiyu\n# @File : 347_top_k_frequent_element.py\nimport heapq\n\nclass Solution:\n def topKFrequent(self, nums, k):\n res = []\n dict = {}\n for i in nums:\n dict[i] = dict.get(i, 0) + 1\n heap = [(-val, key) for key, val in dict.items()]\n heapq.heapify(heap)\n for i in range(k):\n res.append(heapq.heappop(heap)[1])\n return res"
},
{
"alpha_fraction": 0.5011709332466125,
"alphanum_fraction": 0.533957839012146,
"avg_line_length": 24.176469802856445,
"blob_id": "63e0bb277ae531ecc86c1f776f9a33a9c7db0000",
"content_id": "876f5ccde681bc5d0ceecd2c1e08dd84a1e4a15b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 427,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 17,
"path": "/Week_01/49_group_anagrams.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/18 0:33\n# @Author : weiyu\n# @File : 49_group_anagrams.py\n\nclass Solution:\n def groupAnagrams(self, strs):\n dict = {}\n for item in strs:\n key = tuple(sorted(item))\n dict[key] = dict.get(key, []) + [item]\n return list(dict.values())\n\n\nt = Solution()\nprint(t.groupAnagrams([\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))"
},
{
"alpha_fraction": 0.5083114504814148,
"alphanum_fraction": 0.5284339189529419,
"avg_line_length": 19.03508758544922,
"blob_id": "afb7d6c0c3df9ccc68baaaa6e214c6ba9e1a1d27",
"content_id": "9eb4a740f661213c3d444e5cda3544b658ca38c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1143,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 57,
"path": "/Week_01/641_design_circular_deque.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/17 21:31\n# @Author : weiyu\n# @File : 641_design_circular_deque.py\n\n\nclass MyCircularDeque:\n def __init__(self, k):\n self.max_size = k\n self.queue = []\n\n def insertFront(self, value):\n if len(self.queue) < self.max_size:\n self.queue.insert(0, value)\n return True\n return False\n\n\n def insertLast(self, value):\n if len (self.queue) < self.max_size:\n self.queue.append(value)\n return True\n return False\n\n\n def deleteFront(self):\n if self.queue:\n del self.queue[0]\n return True\n return False\n\n def deleteLast(self):\n if self.queue:\n del self.queue[-1]\n return True\n return False\n\n\n def getFront(self):\n if self.queue:\n return self.queue[0]\n return -1\n\n\n def getRear(self):\n if self.queue:\n return self.queue[-1]\n return -1\n\n\n def isEmpty(self):\n return not self.queue\n\n\n def isFull(self):\n return len(self.queue) == self.max_size\n\n"
},
{
"alpha_fraction": 0.5081555843353271,
"alphanum_fraction": 0.539523184299469,
"avg_line_length": 23.18181800842285,
"blob_id": "73efdd2dfb4c544757fab4b61372e14d39edb3bd",
"content_id": "d1691a3c59ea92deb68fe81dc4c72feafdb3cc56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 33,
"path": "/Week_03/169_majority_element.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/27 17:50\n# @Author : weiyu\n# @File : 169_majority_element.py\n\n# sort法\nclass Solution:\n def majorityElement(self, nums):\n nums.sort()\n return nums[len(nums)//2]\n\n# 哈希\nclass Solution:\n def majorityElement(self, nums):\n dict = {}\n for num in nums:\n dict[num] = dict.get(num, 0) + 1\n for key in dict.keys():\n if dict[key] > len(nums)//2:\n return key\n\n\n\n# 分治\nclass Solution:\n def majorityElement(self, nums):\n if len(nums) == 1: return nums[0]\n a = self.majorityElement(nums[:len(nums)//2])\n b = self.majorityElement(nums[len(nums)//2:])\n if a == b:\n return a\n return a if nums.count(a) > len(nums)//2 else b"
},
{
"alpha_fraction": 0.40859031677246094,
"alphanum_fraction": 0.4427312910556793,
"avg_line_length": 25.705883026123047,
"blob_id": "c72bde4d0b3fd3c354168eb02780b0bf6b2708b4",
"content_id": "ebca48477cf7d8f21b0b873afa7390711cff7cdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 918,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 34,
"path": "/Week_04/647_palindromic_substrings.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 1:05\n# @Author : weiyu\n# @File : 647_palindromic_substrings.py\n\n# Dp Time O(n^2) Space O(n^2)\nclass Solution:\n def countSubstrings(self, s):\n n = len(s)\n res = 0\n dp = [[0 for _ in range(n)] for _ in range(n)]\n for i in range(n - 1, -1, -1):\n for j in range(i, n):\n if s[i] == s[j]:\n dp[i][j] = j - i < 2 or dp[i + 1][j - 1]\n res += dp[i][j]\n return res\n\n\n# 中心扩散法 Time O(n^2)\nclass Solution:\n def countSubstrings(self, s):\n self.res = 0\n for i in range(len(s)):\n self.palindrome(s, i, i)\n self.palindrome(s, i, i + 1)\n return self.res\n\n def palindrome(self, s, l, r):\n while l >= 0 and r < len(s) and s[l] == s[r]:\n l -= 1\n r += 1\n self.res += 1\n"
},
{
"alpha_fraction": 0.7282230257987976,
"alphanum_fraction": 0.7334494590759277,
"avg_line_length": 21.920000076293945,
"blob_id": "e3a1b45dd201520e8f3f80d1f0140121f88a18e7",
"content_id": "1d1a6318d5b0c388be0430ca1c515af99ad3b78e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1392,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 25,
"path": "/Week_06/README.md",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "学习笔记\n## 学习总结\n* 对于一些难题感到分析时间复杂度困难,还是要多学习。专注,冷静坚持\n* 对于一些新方法适当选择放弃,抓紧时间,老方法能做出题目也行\n* 面试四步骤:确认题意,阐述所有解并分析时空间复杂度,写代码,测试。勿忘\n* 多总结题型,找相似点,熟能生巧\n* 自上而下的编程\n* 五毒神掌 \n* 熟练度,防BUG!\n\n### 布隆过滤器与lru chche\n* 特点: 布隆过滤器:二进制位随机映射,作为数据库查询前的预查询处理,若全为1则可能存在,若至少有一个0,则一定不存在。lru chche least recently used 两个要素:大小,替换策略 本质位哈希表和双向链表的结合\n* 常见题型:理解概念,并会写lru chche\n\n### 高级搜索\n* 分类:比较类排序和非比较类排序\n* 比较类排序:又分为初级排序O(n^2):选择排序,插入排序,冒泡排序; 和高级排序O(nlogn):快排, 归并排序,堆排序\n* 剩下的特殊排序(非比较类排序)O(n):计数排序,桶排序,基数排序\n\n\n### 动态规划\n* 三步骤:找出子问题(分治),定义状态数组,写出dp方程\n\n### 字符串\n* 各类问题:基础问题,字符串操作问题(随机应变);异位词(多用哈希);回文串(中心扩散或者dp); 其他DP问题\n\n"
},
{
"alpha_fraction": 0.448574960231781,
"alphanum_fraction": 0.4603469669818878,
"avg_line_length": 28.851852416992188,
"blob_id": "0a6f02c87dd6faad805d1c5eede9ba0bab64a601",
"content_id": "ca621b2b45347c1a1ba8550fd8c154b5e56a7782",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1626,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 54,
"path": "/Week_05/51_n_queens.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/28 0:55\n# @Author : weiyu\n# @File : 51_n_queens.py\n\n# 递归\n# class Solution:\n# def solveNQueens(self, n):\n# self.res = []\n# self.cols = set(); self.pie = set(); self.na = set()\n# self.dfs(n, 0, [])\n# return self._generate_result(n)\n#\n# def dfs(self, n, row, status):\n# if row == n:\n# self.res.append(status)\n# return\n#\n# for col in range(n):\n# if col in self.cols or row + col in self.pie or row - col in self.na:\n# continue\n# self.cols.add(col)\n# self.pie.add(row + col)\n# self.na.add(row - col)\n# self.dfs(n, row + 1, status + [col])\n#\n# self.cols.remove(col)\n# self.pie.remove(row + col)\n# self.na.remove(row - col)\n#\n# def _generate_result(self, n):\n# block = []\n# for ch in self.res:\n# for i in ch:\n# block.append(\".\"*i + \"Q\" + \".\"*(n - i -1))\n# return [block[i:i+n] for i in range(0, len(block), n)]\n\n\n# 递归精简\nclass Solution:\n def solveNQueens(self, n):\n self.res = []\n self.dfs([],[],[], n)\n return [[\".\"*i + \"Q\" + \".\"*(n-i-1) for i in sol] for sol in self.res]\n\n def dfs(self, cols, pie, na, n):\n row = len(cols)\n if row == n:\n self.res.append(cols)\n\n for col in range(n):\n if col not in cols and row + col not in pie and row - col not in na:\n self.dfs(cols + [col], pie + [row + col], na + [row - col], n)\n\n\n"
},
{
"alpha_fraction": 0.44880175590515137,
"alphanum_fraction": 0.514161229133606,
"avg_line_length": 23.210525512695312,
"blob_id": "98b7c0dd3b7df73727cc18f5c5454f19086b3930",
"content_id": "631ceba26e36e5a15e4ffc60dbd1b377a05ea470",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 19,
"path": "/Week_01/26_remove_duplicates_in_sorted_array.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/18 15:29\n# @Author : weiyu\n# @File : 26_remove_duplicates_in_sorted_array.py\n\n\nclass Solution:\n def removeDuplicates(self, nums):\n if not nums: return 0\n i = 0\n for j in range(1, len(nums)):\n if nums[i] != nums[j]:\n i += 1\n nums[i] = nums[j]\n return i + 1\n\nt = Solution()\nprint(t.removeDuplicates([0,0,1,1,1,2,2,3,3,4]))"
},
{
"alpha_fraction": 0.43253588676452637,
"alphanum_fraction": 0.46124401688575745,
"avg_line_length": 33.83333206176758,
"blob_id": "de07091fb800de4431c53b951f180a4cfd5257a7",
"content_id": "92357181d3eefeae12814fc5853db7c9f4e5b4ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1045,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 30,
"path": "/Week_05/212_word_search2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/10 18:08\n# @Author : weiyu\n# @File : 212_word_search2.py\n\n\nclass Solution:\n def findWords(self, board, words):\n trie = {}\n for word in words:\n node = trie\n for char in word:\n node = node.setdefault(char, {})\n node[\"#\"] = True\n\n self.res, self.m, self.n = set(), len(board), len(board[0])\n for i in range(self.m):\n for j in range(self.n):\n if board[i][j] in trie:\n self.dfs(i, j, trie[board[i][j]], board[i][j], {(i, j)}, board)\n return list(self.res)\n\n def dfs(self, i, j, node, pre, visited, board):\n if \"#\" in node:\n self.res.add(pre)\n for d in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n x, y = i + d[0], j + d[1]\n if 0 <= x < self.m and 0 <= y < self.n and board[x][y] in node and (x, y) not in visited:\n self.dfs(x, y, node[board[x][y]], pre + board[x][y], visited | {(x, y)}, board)\n"
},
{
"alpha_fraction": 0.46983546018600464,
"alphanum_fraction": 0.5045703649520874,
"avg_line_length": 25.095237731933594,
"blob_id": "5be5027a9486259d3c134a80b4a9290dd00dd7eb",
"content_id": "7a6759e9c690901b2ffc7fe00999d570aad5d7d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 21,
"path": "/Week_02/47_permutations2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/25 23:51\n# @Author : weiyu\n# @File : 47_permutations2.py\n\n\nclass Solution:\n def permuteUnique(self, nums):\n res = []\n nums.sort()\n self.recursion(nums, [], res)\n return res\n\n def recursion(self, nums, path, res):\n if not nums:\n res.append(path)\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n self.recursion(nums[:i] + nums[i + 1:], path + [nums[i]], res)"
},
{
"alpha_fraction": 0.40362319350242615,
"alphanum_fraction": 0.428260862827301,
"avg_line_length": 27.183673858642578,
"blob_id": "13413df1305afae7e61d30979f6d5baefd9b84e8",
"content_id": "70c7b2d80ba570ba3c7f99efaae7dc5aecf5502e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1380,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 49,
"path": "/Week_06/493_reverse_pairs.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/19 0:36\n# @Author : weiyu\n# @File : 493_reverse_pairs.py\n\n\nclass Solution:\n def reversePairs(self, nums):\n if not nums: return 0\n self.cnt = 0\n left, right = 0, len(nums) - 1\n self.mergesort(nums, left, right)\n return self.cnt\n\n def mergesort(self, nums, left, right):\n if right <= left: return\n mid = (left + right) // 2\n self.mergesort(nums, left, mid)\n self.mergesort(nums, mid + 1, right)\n self.merge(nums, left, mid, right)\n\n def merge(self, nums, left, mid, right):\n tmp = []\n i = left\n j = mid + 1\n while i <= mid and j <= right:\n if nums[i] > 2 * nums[j]:\n self.cnt += mid - i + 1\n j += 1\n else:\n i += 1\n # i = left\n # j = mid + 1\n # while i <= mid and j <= right:\n # if nums[i] <= nums[j]:\n # tmp.append(nums[i])\n # i += 1\n # else:\n # tmp.append(nums[j])\n # j += 1\n # while i <= mid:\n # tmp.append(nums[i])\n # i += 1\n # while j <= right:\n # tmp.append(nums[j])\n # j += 1\n # nums[left: right + 1] = tmp\n nums[left:right + 1] = sorted(nums[left: right + 1])"
},
{
"alpha_fraction": 0.4464609920978546,
"alphanum_fraction": 0.4863883852958679,
"avg_line_length": 25.238094329833984,
"blob_id": "c05707621f051459cda3e6428e1690f67d165877",
"content_id": "2d5a4fd62a4cea681d12817f5ec4202450e22a5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 21,
"path": "/Week_02/77_combinations.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/25 22:24\n# @Author : weiyu\n# @File : 77_combinations.py\n\n# 递归 加 剪枝\nclass Solution:\n def combine(self, n, k):\n res = []\n nums = range(1, n + 1)\n self.recursion(nums, k, 0, [], res)\n return res\n\n def recursion(self, nums, k, index, s, res):\n if k == 0:\n res.append(s)\n if k > len(nums) - index + 1:\n return\n for i in range(index, len(nums)):\n self.recursion(nums, k - 1, i + 1, s + [nums[i]], res)\n"
},
{
"alpha_fraction": 0.47311827540397644,
"alphanum_fraction": 0.5071684718132019,
"avg_line_length": 25.619047164916992,
"blob_id": "809a19b60d5d3511420abc2f981834da54990d91",
"content_id": "89b3528097b9094f0de50992d19da1dd9439f63e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 562,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 21,
"path": "/Week_05/22_generate_parentheses.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/25 1:09\n# @Author : weiyu\n# @File : 22_generate_parentheses.py\n\n# DFS 递归\nclass Solution:\n def generateParenthesis(self, n):\n res = []\n self.recursion(n, n, \"\", res)\n return res\n\n def recursion(self, left, right, s, res):\n if left == 0 and right == 0:\n res.append(s)\n return\n if left > 0:\n self.recursion(left - 1, right, s + \"(\", res)\n if right > left:\n self.recursion(left, right - 1, s + \")\", res)"
},
{
"alpha_fraction": 0.448738157749176,
"alphanum_fraction": 0.4700315594673157,
"avg_line_length": 25.41666603088379,
"blob_id": "b04054b0f53c891ecc8be7498771d4f1df21e656",
"content_id": "2c8f37b68da14aea02c91974519b97c30b1d0421",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1268,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 48,
"path": "/Week_05/547_friend_circles.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/10 21:45\n# @Author : weiyu\n# @File : 547_friend_circles.py\n\n# DFS time O(n^2) Space O(n)\nclass Solution:\n def findCircleNum(self, M):\n if not M: return 0\n n = len(M)\n count = 0\n self.visited = set()\n for i in range(n):\n if i not in self.visited:\n self.visited.add(i)\n self.dfs(i, M)\n count += 1\n return count\n\n def dfs(self, node, M):\n for nei, adj in enumerate(M[node]):\n if adj and nei not in self.visited:\n self.visited.add(nei)\n self.dfs(nei, M)\n\n# unionfind time O(n^3) Space O(n)\nclass Solution:\n def findCircleNum(self, M):\n if not M: return 0\n n = len(M)\n p = [i for i in range(n)]\n for i in range(n):\n for j in range(n):\n if M[i][j] == 1:\n self.union(p, i, j)\n return len(set(self.parent(p, i) for i in range(n)))\n\n def union(self, p, i, j):\n p1 = self.parent(p, i)\n p2 = self.parent(p, j)\n p[p2] = p1\n\n def parent(self, p, i):\n root = p[i]\n while p[root] != root:\n root = p[root]\n return root\n"
},
{
"alpha_fraction": 0.3996763825416565,
"alphanum_fraction": 0.4336569607257843,
"avg_line_length": 27.136363983154297,
"blob_id": "f5a71c43a2d6f4bade2a899e7e9ffdf170a4406a",
"content_id": "ad692fc7723d3d9dde7f2f9c4bbfebf46f2ab1a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 626,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 22,
"path": "/Week_04/76_minimum_window_substring.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 23:09\n# @Author : weiyu\n# @File : 76_minimum_window_substring.py\nimport collections\n\n# 滑窗思想\nclass Solution:\n def minWindow(self, s, t):\n need, missing = collections.Counter(t), len(t)\n i = I = J = 0\n for j, c in enumerate(s, 1):\n missing -= need[c] > 0\n need[c] -= 1\n if not missing:\n while i < j and need[s[i]] < 0:\n need[s[i]] += 1\n i += 1\n if not J or j - i <= J - I:\n I, J = i, j\n return s[I:J]"
},
{
"alpha_fraction": 0.37294334173202515,
"alphanum_fraction": 0.4149908721446991,
"avg_line_length": 25.658536911010742,
"blob_id": "99ab38236ff9b2cd4d64122fdea945a13421ee45",
"content_id": "0ce84489473829acce37e02a8200b7ef036f3406",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1094,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 41,
"path": "/Week_03/529_update_minesweeper.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/29 18:47\n# @Author : weiyu\n# @File : 529_update_minesweeper.py\n\n\nclass Solution:\n def updateBoard(self, board, click):\n if not board: return []\n i, j = click[0], click[1]\n if board[i][j] == \"M\":\n board[i][j] = \"X\"\n return board\n\n self.dfs(board, i, j)\n return board\n\n def dfs(self, board, i, j):\n if board[i][j] != \"E\":\n return\n\n m, n = len(board), len(board[0])\n directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n\n mine_count = 0\n for d in directions:\n ni, nj = i + d[0], j + d[1]\n if 0 <= ni < m and 0 <= nj < n and board[ni][nj] == \"M\":\n mine_count += 1\n\n if mine_count == 0:\n board[i][j] = \"B\"\n else:\n board[i][j] = str(mine_count)\n return\n\n for d in directions:\n ni, nj = i + d[0], j + d[1]\n if 0 <= ni < m and 0 <= nj < n :\n self.dfs(board, ni, nj)\n\n"
},
{
"alpha_fraction": 0.4392712414264679,
"alphanum_fraction": 0.48178136348724365,
"avg_line_length": 22.571428298950195,
"blob_id": "f384241c9439b056cb941b0449caa1a2bbcc971a",
"content_id": "5825a913bff9552f1a54f6c68e4d7b987189a75d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/Week_06/541_reverse_string2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 10:34\n# @Author : weiyu\n# @File : 541_reverse_string2.py\n\n\nclass Solution:\n def reverseStr(self, s, k):\n s = list(s)\n for i in range(0, len(s), 2 * k):\n s[i:i + k] = reversed(s[i:i + k])\n return \"\".join(s)\n\n\nclass Solution:\n def reverseStr(self, s, k):\n s = list(s)\n for i in range(0, len(s), 2 * k):\n s[i:i + k] = reversed(s[i:i + k])\n return \"\".join(s)"
},
{
"alpha_fraction": 0.47596898674964905,
"alphanum_fraction": 0.5147286653518677,
"avg_line_length": 29.714284896850586,
"blob_id": "e109c59c8834e92feb56b7247ac0f4158be0f11e",
"content_id": "cb3da4a84cca2d45db976448e54b4088f22ab108",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 21,
"path": "/Week_03/17_letter_combination_of_a_phone_number.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/27 19:29\n# @Author : weiyu\n# @File : 17_letter_combination_of_a_phone_number.py\n\n# 递归\nclass Solution:\n def letterCombinations(self, digits):\n self.dict = {\"2\":\"abc\", \"3\":\"def\", \"4\":\"ghi\", \"5\":\"jkl\", \"6\":\"mno\", \"7\":\"pqrs\", \"8\":\"tuv\", \"9\": \"wxyz\"}\n self.res = []\n if digits == \"\": return []\n self.recursion(digits, \"\")\n return self.res\n\n def recursion(self, digits, path):\n if digits == \"\":\n self.res.append(path)\n return\n for i in self.dict[digits[0]]:\n self.recursion(digits[1:], path + i)\n"
},
{
"alpha_fraction": 0.45116278529167175,
"alphanum_fraction": 0.539534866809845,
"avg_line_length": 20.5,
"blob_id": "e29fb76c8c8abaa42c223ea166bdf5c0cab8f540",
"content_id": "1273134ea6283c2cd9280cbd8ec8334db4c6eb60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 10,
"path": "/Week_05/231_power_of_two.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/12 23:16\n# @Author : weiyu\n# @File : 231_power_of_two.py\n\n\nclass Solution:\n def isPowerOfTwo(self, n):\n return n > 0 and n & (n - 1) == 0\n"
},
{
"alpha_fraction": 0.35008373856544495,
"alphanum_fraction": 0.40033501386642456,
"avg_line_length": 30.473684310913086,
"blob_id": "07287fdf6692884d4060905fd497e3782652b56a",
"content_id": "f2de946231ee638a0ca93746df2b97e0083f9145",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 597,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 19,
"path": "/Week_06/32_longest_valid_parentheses.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 16:13\n# @Author : weiyu\n# @File : 32_longest_valid_parentheses.py\n\n\nclass Solution:\n def longestValidParentheses(self, s):\n if not s: return 0\n n = len(s)\n dp = [0 for _ in range(n)]\n for i in range(1, n):\n if s[i] == \")\":\n if s[i - 1] == \"(\":\n dp[i] = dp[i - 2] + 2\n elif s[i - 1] == \")\" and i - dp[i - 1] - 1 >= 0 and s[i - dp[i - 1] - 1] == \"(\":\n dp[i] = dp[i - 1] + 2 + dp[i - dp[i - 1] - 2]\n return max(dp)"
},
{
"alpha_fraction": 0.39590445160865784,
"alphanum_fraction": 0.47098976373672485,
"avg_line_length": 20,
"blob_id": "1742ac9edd480d50fc86308d736180cf8b116a1e",
"content_id": "2fe4c482ba59f6f59fa9434938a72d1912d7d2a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 14,
"path": "/Week_05/190_reverse_bits.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/12 23:34\n# @Author : weiyu\n# @File : 190_reverse_bits.py\n\n\nclass Solution:\n def reverseBits(self, n):\n res = 0\n for i in range(32):\n res = (res << 1) + (n & 1)\n n >>= 1\n return res"
},
{
"alpha_fraction": 0.4655172526836395,
"alphanum_fraction": 0.517241358757019,
"avg_line_length": 26.352941513061523,
"blob_id": "23bb9a615a70b9ef8daeb2d297a5f28fcc994d7f",
"content_id": "082c1bc334e9a9a935c6986b9e67fb4c1e5b5b19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 464,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 17,
"path": "/Week_06/56_merge_intervals.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/18 20:46\n# @Author : weiyu\n# @File : 56_merge_intervals.py\n\n\nclass Solution:\n def mergeIntervals(self, intervals):\n intervals.sort(key = lambda x:x[0])\n res = []\n for interval in intervals:\n if not res or res[-1][1] < interval[0]:\n res.append(interval)\n else:\n res[-1][1] = max(res[-1][1], interval[1])\n return res"
},
{
"alpha_fraction": 0.4404761791229248,
"alphanum_fraction": 0.46875,
"avg_line_length": 25.8799991607666,
"blob_id": "51de70ed7def9038815ff6af9d120c14d304eb14",
"content_id": "0d83a2ee72cdfef1e13f11fe6b6d168a646df6f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 25,
"path": "/Week_06/5_longest_palindromic_substring.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/19 10:41\n# @Author : weiyu\n# @File : 5_longest_palindromic_substring.py\n\n# 中心扩散法。时间O(n)\nclass Solution:\n def longestPalindrome(self, s):\n res = \"\"\n for i in range(len(s)):\n odd_str = self.palindrome(s, i, i)\n even_str = self.palindrome(s, i, i + 1)\n if len(odd_str) > len(res):\n res = odd_str\n if len (even_str) > len(res):\n res = even_str\n return res\n\n\n def palindrome(self, s, l, r):\n while l >= 0 and r < len(s) and s[l] == s[r]:\n l -= 1\n r += 1\n return s[l + 1:r]\n"
},
{
"alpha_fraction": 0.40409955382347107,
"alphanum_fraction": 0.433382123708725,
"avg_line_length": 23.39285659790039,
"blob_id": "f91f15340a3579b9207939310a390fb7a988a7d9",
"content_id": "65d7c194b608683364fc739fcaeb3346109af55c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 683,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 28,
"path": "/Week_04/410_split_array_largest_sum.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 21:02\n# @Author : weiyu\n# @File : 410_split_array_largest_sum.py\n\n\nclass Solution:\n def splitArray(self, nums, m):\n left = max(nums)\n right = sum(nums)\n while left < right:\n mid = (left + right) // 2\n if self.check(mid, nums, m):\n right = mid\n else:\n left = mid + 1\n return left\n\n def check(self, x, nums, m):\n total, cnt = 0, 1\n for num in nums:\n if total + num > x:\n cnt += 1\n total = num\n else:\n total += num\n return cnt <= m\n"
},
{
"alpha_fraction": 0.3659793734550476,
"alphanum_fraction": 0.434707909822464,
"avg_line_length": 26.714284896850586,
"blob_id": "c763bb2efe84a45052c47a7e8866d0f440d84473",
"content_id": "9bdb4e885037657efeb9404fcbc8d8aa13271f85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 21,
"path": "/Week_04/552_student_attendance_record2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 21:57\n# @Author : weiyu\n# @File : 552_student_attendance_record2.py\n\n\nclass Solution:\n def checkRecord(self, n):\n if n == 1: return 3\n if n == 0: return 0\n mod = 10**9 + 7\n dp = [0 for _ in range(n + 1)]\n dp[0], dp[1], dp[2] = 1, 2, 4\n for i in range(3, n + 1):\n dp[i] = (dp[i - 1] + dp[i - 2] + dp[i - 3]) % mod\n res = dp[n]\n for i in range(1, n + 1):\n res += dp[i - 1] * dp[n - i] % mod\n res = res % mod\n return res\n"
},
{
"alpha_fraction": 0.4584774971008301,
"alphanum_fraction": 0.4913494884967804,
"avg_line_length": 22.15999984741211,
"blob_id": "abe3472442d73a320d0448bacb588056e0b7e3d4",
"content_id": "5e769f58bb326652b5dfe2f3ea6ee8ebb67c3e89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 594,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 25,
"path": "/Week_03/55_jump_game2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/30 0:23\n# @Author : weiyu\n# @File : 55_jump_game2.py\n\n# 从前往后\nclass Solution:\n def canJump(self, nums):\n max_len = 0\n for i, n in enumerate(nums):\n if max_len < i:\n return False\n max_len = max(max_len, i + n)\n return True\n\n\n# 从后往前\nclass Solution:\n def canJump(self, nums):\n can_reach = len(nums) - 1\n for i in range(len(nums))[::-1]:\n if i + nums[i] >= can_reach:\n can_reach = i\n return can_reach == 0"
},
{
"alpha_fraction": 0.5610560774803162,
"alphanum_fraction": 0.5907590985298157,
"avg_line_length": 29.350000381469727,
"blob_id": "a9b37df19c9f83049a3a7823f5a0aa01fdc51927",
"content_id": "e899285e67c623e3d6876244eae50eb2a9c80246",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 606,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 20,
"path": "/Week_02/105_construst_binary_tree_from_preorder_and_inorder_traversal.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/25 20:50\n# @Author : weiyu\n# @File : 105_construst_binary_tree_from_preorder_and_inorder_traversal.py\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left, self.right = None, None\n\n\nclass Solution:\n def buildTree(self, preorder, inorder):\n if inorder:\n idx = inorder.index(preorder.pop(0))\n root = TreeNode(inorder[idx])\n root.left = self.buildTree(preorder, inorder[:idx])\n root.right = self.buildTree(preorder, inorder[idx + 1:])\n return root"
},
{
"alpha_fraction": 0.4038461446762085,
"alphanum_fraction": 0.44505494832992554,
"avg_line_length": 27,
"blob_id": "da2d850d07112108e3a99113e4036774c23c0bed",
"content_id": "b89fa8618df76b9f59554f1aac98086dfeffd40f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 26,
"path": "/Week_03/200_number_of_island.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/29 16:58\n# @Author : weiyu\n# @File : 200_number_of_island.py\n\n# dfs\nclass Solution:\n def numIslands(self, grid):\n if not grid: return 0\n res = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n self.dfs(grid, i, j)\n res += 1\n return res\n\n def dfs(self, grid, i, j):\n if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[0]) or grid[i][j] != \"1\":\n return\n grid[i][j] = \"0\"\n self.dfs(grid, i + 1, j)\n self.dfs(grid, i, j + 1)\n self.dfs(grid, i - 1, j)\n self.dfs(grid, i, j - 1)\n"
},
{
"alpha_fraction": 0.4387434422969818,
"alphanum_fraction": 0.4565444886684418,
"avg_line_length": 30.866666793823242,
"blob_id": "e3080af111b2a2f14c4aba8b75c478e9881fd062",
"content_id": "17b2fbc3dcf0fd9b3f98efd8f17285ef81a92995",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 30,
"path": "/Week_03/126_word_ladder2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/29 1:23\n# @Author : weiyu\n# @File : 126_word_ladder2.py\nimport collections\n\n# BFS\nclass Solution:\n def findLadders(self, beginWord, endWord, wordList):\n wordList = set(wordList)\n res = []\n layer = {}\n layer[beginWord] = [[beginWord]]\n\n while layer:\n newlayer = collections.defaultdict(list)\n for w in layer:\n if w == endWord:\n res.extend(k for k in layer[w])\n return res\n else:\n for i in range(len(w)):\n for c in \"abcdefghijklmnopqrstuvwxyz\":\n new = w[:i] + c + w[i + 1:]\n if new in wordList:\n newlayer[new] += [j + [new] for j in layer[w]]\n wordList -= set(newlayer.keys())\n layer = newlayer\n return res"
},
{
"alpha_fraction": 0.38352271914482117,
"alphanum_fraction": 0.4232954680919647,
"avg_line_length": 30.954545974731445,
"blob_id": "33d36c9697c7c9b293c2645b44ff331cff3b943d",
"content_id": "2d6d8d1b26ce7ad54322cb3941889a2ef6d0b7b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 704,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 22,
"path": "/Week_04/221_maximal_square.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/8 23:35\n# @Author : weiyu\n# @File : 221_maximal_square.py\n\n\nclass Solution:\n def maximalSquare(self, matrix):\n if not matrix: return 0\n m, n = len(matrix), len(matrix[0])\n dp = [[0 for _ in range(n)] for _ in range(m)]\n maxside = 0\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == \"1\":\n if i == 0 or j == 0:\n dp[i][j] = 1\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1\n maxside = max(maxside, dp[i][j])\n return maxside * maxside\n\n"
},
{
"alpha_fraction": 0.3728155195713043,
"alphanum_fraction": 0.43495145440101624,
"avg_line_length": 24.799999237060547,
"blob_id": "4f031c08854c25d61c075284fb993c40c9676a85",
"content_id": "002402da0c3dea7ff45e75e4352070155b54f996",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 20,
"path": "/Week_06/8_string_to_integer_atoi.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 0:40\n# @Author : weiyu\n# @File : 8_string_to_integer_atoi.py\n\n\n\nclass Solution:\n def myAtoi(self, str):\n s = list(str.strip())\n if len(s) == 0: return 0\n sign = -1 if s[0] == \"-\" else 1\n if s[0] in [\"+\", \"-\"]:\n del s[0]\n res, i = 0, 0\n while i < len(s) and s[i].isdigit():\n res = res * 10 + int(s[i])\n i += 1\n return max(-2 ** 31, min(res * sign, 2 ** 31 - 1))"
},
{
"alpha_fraction": 0.5277777910232544,
"alphanum_fraction": 0.5625,
"avg_line_length": 26.0625,
"blob_id": "50bba1b031f6b26df64745a4488f051af8cc1198",
"content_id": "eed438eb66bb3f8d6ed0d4b4cd369408fb509b50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 432,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 16,
"path": "/Week_02/429_n_ary_levelorder_traversal.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/22 1:26\n# @Author : weiyu\n# @File : 429_n_ary_levelorder_traversal.py\n\n\nclass Solution:\n def levelOrder(self, root):\n if not root: return []\n queue = [root]\n res = []\n while queue:\n res.append([node.val for node in queue])\n queue = [child for node in queue for child in node.children if child]\n return res"
},
{
"alpha_fraction": 0.43148356676101685,
"alphanum_fraction": 0.45413362979888916,
"avg_line_length": 29.44827651977539,
"blob_id": "f93f9e9c1ae501de62b120816f4f7569e08a0b6e",
"content_id": "0f9d9fe967b28d0e1c599ce93b49653bfc3f49e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 29,
"path": "/Week_04/363_max_sum_of_rectangle_no_larger_than_k.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 18:23\n# @Author : weiyu\n# @File : 363_max_sum_of_rectangle_no_larger_than_k.py\nimport bisect\n\n\n# 固定左右边界 + 最大子序和(一维数组存每行的和) 的思想\nclass Solution:\n def maxSumSubmatrix(self, matrix, k):\n if not matrix: return 0\n row, col = len(matrix), len(matrix[0])\n res = float(\"-inf\")\n for left in range(col):\n _sum = [0] * row\n for right in range(left, col):\n for i in range(row):\n _sum[i] += matrix[i][right]\n\n arr = [0]\n cur = 0\n for num in _sum:\n cur += num\n loc = bisect.bisect_left(arr, cur - k)\n if loc < len(arr):\n res = max(res, cur - arr[loc])\n bisect.insort(arr, cur)\n return res\n"
},
{
"alpha_fraction": 0.4765625,
"alphanum_fraction": 0.50390625,
"avg_line_length": 30.95833396911621,
"blob_id": "936478df66416e5a6c5c36491cc0fca6aac5964d",
"content_id": "357841677085e89b4bd2612f93baf8cef70a1e3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 768,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 24,
"path": "/Week_03/127_word_ladder.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/28 23:36\n# @Author : weiyu\n# @File : 127_word_ladder.py\nimport collections\n\n# BFS\nclass Solution:\n def ladderLength(self, beginWord, endWord, wordList):\n bank = set(wordList)\n queue = collections.deque([(beginWord, 1)])\n if endWord not in wordList: return 0\n while queue:\n node, step = queue.popleft()\n if node == endWord:\n return step\n for i in range(len(node)):\n for c in \"abcdefghijklmnopqrstuvwxyz\":\n new = node[:i] + c + node[i + 1:]\n if new in bank:\n queue.append((new, step + 1))\n bank.remove(new)\n return 0\n\n"
},
{
"alpha_fraction": 0.49595141410827637,
"alphanum_fraction": 0.5425100922584534,
"avg_line_length": 26.5,
"blob_id": "a1e2262b3c5c17c9ff617ec26c28481ee0449bbf",
"content_id": "167436ddbe20a06d76ebcf25ebecd6e4f62afcd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 18,
"path": "/Week_03/122_best_time_to_buy_and_sell_stock2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/29 22:16\n# @Author : weiyu\n# @File : 122_best_time_to_buy_and_sell_stock2.py\n\n# greedy\nclass Solution:\n def maxProfit(self, prices):\n res = 0\n for i in range(1, len(prices)):\n if prices[i] > prices[i - 1]:\n res += prices[i] - prices[i - 1]\n return res\n\nclass Solution:\n def maxProfit(self, prices):\n return sum(b - a for a, b in zip(prices[:-1], prices[1:]) if b > a)"
},
{
"alpha_fraction": 0.3393213450908661,
"alphanum_fraction": 0.4091816246509552,
"avg_line_length": 25.421052932739258,
"blob_id": "b43c41c2b9ed00e8a110d49954944d7e885b8e83",
"content_id": "216ee8cf444ce8e605aebbdc3258ab2048efbb7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 19,
"path": "/Week_04/91_decode_ways.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/8 22:55\n# @Author : weiyu\n# @File : 91_decode_ways.py\n\n\nclass Solution:\n def numDecodings(self, s):\n if not s or s[0] == \"0\": return 0\n dp = [0 for _ in range(len(s) + 1)]\n dp[0] = 1\n dp[1] = 1\n for i in range(2, len(s) + 1):\n if int(s[i - 1]) != 0:\n dp[i] += dp[i - 1]\n if 10 <= int(s[i - 2:i]) <= 26:\n dp[i] += dp[i - 2]\n return dp[-1]"
},
{
"alpha_fraction": 0.5071770548820496,
"alphanum_fraction": 0.5430622100830078,
"avg_line_length": 22.22222137451172,
"blob_id": "b9a81db4b2cbc8c805f7b2f2b4f8d4e61655b8d8",
"content_id": "e32d21e33c1d44d414dd82b032778634c8a73959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 18,
"path": "/Week_02/144_binary_tree_preorder_traversal.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/22 0:22\n# @Author : weiyu\n# @File : 144_binary_tree_preorder_traversal.py\n\n\nclass Solution:\n def preorderTraversal(self, root):\n res = []\n self.dfs(root, res)\n return res\n\n def dfs(self, root, res):\n if root:\n res.append(root.val)\n self.dfs(root.left, res)\n self.dfs(root.right, res)\n"
},
{
"alpha_fraction": 0.6716049313545227,
"alphanum_fraction": 0.6765432357788086,
"avg_line_length": 16.60869598388672,
"blob_id": "67af878ef9bafc9d9835f3af456343810fd189a7",
"content_id": "d87006a2126f31a6ac2be8892e1f95a9f3f4feba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 23,
"path": "/Week_03/README.md",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "学习笔记\n## 学习总结\n* 面试四步骤:确认题意,阐述所有解并分析时空间复杂度,写代码,测试。勿忘\n* 多总结题型,找相似点,熟能生巧\n* 自上而下的编程\n* 五毒神掌 \n* 熟练度,防BUG!\n\n\n### 分治,回溯\n* 常见题型:电话号码组合,N皇后等。\n* 本质为dfs递归。\n* 找重复性子问题。\n### DFS和BFS\n* 常见题型:树类题,括号生成,最小基因,单词接龙,岛屿,扫雷。\n* 具有套路,针对题型看是否加vsited。\n### 贪心\n* 常见题型:跳跃问题\n* 能够拆分为子问题,子问题最优解能递推到最终问题就有接。不能回退\n### 二分查找\n* 常见题型:有序搜索的问题,平方或根类问题。\n* 特点:有上下界,升序或降序,能索引。\n* 关于while i <= j: 部分是否取等号。当下方索引变换i = mid + 1且 j = mid - 1时取等号,否则不取等号,看具体题意。\n"
},
{
"alpha_fraction": 0.44111350178718567,
"alphanum_fraction": 0.5096359848976135,
"avg_line_length": 18.45833396911621,
"blob_id": "242a6644776f183342c4b724ad2b6a7d19f133fa",
"content_id": "157bacd5e2a967ebdd8d69be01d0be8f6d9b6dd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 467,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 24,
"path": "/Week_01/283_move_zeros.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/16 11:22\n# @Author : weiyu\n# @File : 283_move_zeros.py\n\nclass Solution:\n def moveZeros(self, nums):\n zero = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[zero], nums[i] = nums[i], nums[zero]\n zero += 1\n\n\nt = Solution()\n\nnums = [0, 1, 0, 3, 12]\nt.moveZeros(nums)\nprint(nums)\n\nnums = [0, 1, 3, 0, 9, 0, 0]\nt.moveZeros(nums)\nprint(nums)\n"
},
{
"alpha_fraction": 0.3916083872318268,
"alphanum_fraction": 0.46503496170043945,
"avg_line_length": 20.769229888916016,
"blob_id": "de52d41ff3b4f32484c1afc0ae04065f25c297fb",
"content_id": "7191bdc360f864d50c002857bad7db9f56a6c6cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 286,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 13,
"path": "/Week_05/191_number_of_1_bits.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/12 23:02\n# @Author : weiyu\n# @File : 191_number_of_1_bits.py\n\nclass Solution:\n def hammingWeight(self, n):\n sum = 0\n while n != 0:\n sum += 1\n n = n & (n - 1)\n return sum\n\n\n\n"
},
{
"alpha_fraction": 0.4065484404563904,
"alphanum_fraction": 0.46793997287750244,
"avg_line_length": 24.310344696044922,
"blob_id": "be7b514fb3029140b248b2df4486746c70284202",
"content_id": "da8e7c22b412e0c9989025a2b41a18ed24c6e352",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 29,
"path": "/Week_01/21_merge_two_sorted_lists.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/18 17:31\n# @Author : weiyu\n# @File : 21_merge_two_sorted_lists.py\n\n\n# 递归\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n if l1 and l2:\n if l1.val > l2.val: l1, l2 = l2, l1\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1 or l2\n\n# 迭代\n# class Solution:\n# def mergeTwoLists(self, l1, l2):\n# dummy = cur = ListNode(0)\n# while l1 and l2:\n# if l1.val < l2.val:\n# cur.next = l1\n# l1 = l1.next\n# else:\n# cur.next = l2\n# l2 = l2.next\n# cur = cur.next\n# cur.next = l1 or l2\n# return dummy.next"
},
{
"alpha_fraction": 0.343976765871048,
"alphanum_fraction": 0.37590712308883667,
"avg_line_length": 26.559999465942383,
"blob_id": "4b8d985f6eb74115de1a17d4f285153f64806909",
"content_id": "828062da80025a57119e5d939fe16360dce9f7e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 689,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 25,
"path": "/Week_03/33_search_in_rotated_sorted_array.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/31 1:37\n# @Author : weiyu\n# @File : 33_search_in_rotated_sorted_array.py\n\n\nclass Solution:\n def search(self, nums, target):\n l, r = 0, len(nums) - 1\n while l <= r:\n mid = (l + r) // 2\n if nums[mid] == target:\n return mid\n if nums[l] <= nums[mid]:\n if nums[l] <= target <= nums[mid]:\n r = mid - 1\n else:\n l = mid + 1\n else:\n if nums[mid] <= target <= nums[r]:\n l = mid + 1\n else:\n r = mid - 1\n return -1\n"
},
{
"alpha_fraction": 0.3439065217971802,
"alphanum_fraction": 0.39899832010269165,
"avg_line_length": 29,
"blob_id": "307b79490b4ad1ce29f90f2251f8dfbb78539b2a",
"content_id": "f4e6dfa6d794ec12ff72bcf7822228b9b840ea2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 599,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 20,
"path": "/Week_06/115_distinct_subsequences.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/20 1:39\n# @Author : weiyu\n# @File : 115_distinct_subsequences.py\n\n\nclass Solution:\n def numDistinct(self, s, t):\n m, n = len(t), len(s)\n dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]\n for j in range(n + 1):\n dp[0][j] = 1\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if t[i - 1] == s[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + dp[i][j - 1]\n else:\n dp[i][j] = dp[i][j - 1]\n return dp[-1][-1]"
},
{
"alpha_fraction": 0.5784499049186707,
"alphanum_fraction": 0.6030246019363403,
"avg_line_length": 30.176469802856445,
"blob_id": "55c49e068115728f874846ddd7626a6afcbb0ba0",
"content_id": "a61e84ab33ce2f46539523f6b1811754911ca120",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/Week_02/236_lowest_common_ancestor_of_a_binary_tree.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/25 20:17\n# @Author : weiyu\n# @File : lowest_common_ancestor_of_a_binary_tree.py\n\n\nclass Solution:\n def lowestCommonAncestor(self, root, p, q):\n if not root: return None\n if p == root or q == root:\n return root\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n if left and right: return root\n if not left: return right\n if not right: return left"
},
{
"alpha_fraction": 0.39289143681526184,
"alphanum_fraction": 0.4313160479068756,
"avg_line_length": 34.86206817626953,
"blob_id": "cc0f5b3f044f7cc285b85b7da7e58fc4e5bdff7c",
"content_id": "7f5ed0eee0d6d79e59d782061b6cedf220f056c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1041,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 29,
"path": "/Week_05/130_surrounded_regions.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/10 22:50\n# @Author : weiyu\n# @File : 130_surrounded_regions.py\n\n# DPS Time O(n^2) Space O(1)\nclass Solution:\n def solve(self, board):\n if not board: return []\n m, n = len(board), len(board[0])\n for i in range(m):\n if board[i][0] == \"O\": self.dfs(i, 0, board)\n if board[i][n - 1] == \"O\": self.dfs(i, n - 1, board)\n for j in range(n):\n if board[0][j] == \"O\": self.dfs(0, j, board)\n if board[m - 1][j] == \"O\": self.dfs(m - 1, j, board)\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\": board[i][j] = \"X\"\n if board[i][j] == \"B\": board[i][j] = \"O\"\n\n def dfs(self, i, j, board):\n if i < 0 or j < 0 or i >= len(board) or j >= len(board[0]) or board[i][j] != \"O\":\n return\n board[i][j] = \"B\"\n for d in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n x, y = i + d[0], j + d[1]\n self.dfs(x, y, board)\n\n"
},
{
"alpha_fraction": 0.42824074625968933,
"alphanum_fraction": 0.48379629850387573,
"avg_line_length": 20.649999618530273,
"blob_id": "68bc87ca6b5622238499577deaba281600f740f7",
"content_id": "d1e6e6acc0abbaa8931ffc2bc6ebc515551706c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 432,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 20,
"path": "/Week_05/70_climbing_stairs.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/16 15:53\n# @Author : weiyu\n# @File : 70_climbing_stairs.py\n\nclass Solution:\n def climbStairs(self, n):\n if n == 1 or n ==2:\n return n\n a, b, temp = 1, 2, 0\n for i in range(3, n + 1):\n temp = a + b\n a = b\n b = temp\n return temp\n\nt = Solution()\nprint(t.climbStairs(2))\nprint(t.climbStairs(3))"
},
{
"alpha_fraction": 0.4320000112056732,
"alphanum_fraction": 0.48266667127609253,
"avg_line_length": 22.5,
"blob_id": "d66389788a5141db1cf52210ce3ba505be5320c4",
"content_id": "14a73c0e0834bf9c66ea90f1e7aafb19097c0123",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 16,
"path": "/Week_06/387_first_unique_character_in_a_string.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 0:16\n# @Author : weiyu\n# @File : 387_first_unique_character_in_a_string.py\n\n\nclass Solution:\n def firstUniqChar(self, s):\n dic = {}\n for c in s:\n dic[c] = dic.get(c, 0) + 1\n for i, c in enumerate(s):\n if dic[c] == 1:\n return i\n return -1"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.47361111640930176,
"avg_line_length": 30.34782600402832,
"blob_id": "8939d4930d9252819b6f63584448d2bf88f72843",
"content_id": "bff21cdf98fe7a0a5cb773023c21402d95ce2e92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 23,
"path": "/Week_05/433_minimum_genetic_mutation.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/28 21:15\n# @Author : weiyu\n# @File : 433_minimum_genetic_mutation.py\nimport collections\n\nclass Solution:\n def minMutation(self, start, end, bank):\n bank = set(bank)\n if end not in bank: return -1\n queue = collections.deque([(start, 0)])\n while queue:\n node, step = queue.popleft()\n if node == end:\n return step\n for i in range(len(node)):\n for j in \"ACGT\":\n new = node[:i] + j + node[i + 1:]\n if new in bank:\n queue.append((new, step + 1))\n bank.remove(new)\n return -1"
},
{
"alpha_fraction": 0.490848571062088,
"alphanum_fraction": 0.5374376177787781,
"avg_line_length": 23,
"blob_id": "30fb69cab26b4a90c2302755f8a215dfaa5f5133",
"content_id": "4e4d58f4da24188e5239da010c0dd3d407e7115a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 609,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 25,
"path": "/Week_06/242_valid_anagram.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/17 23:58\n# @Author : weiyu\n# @File : 242_valid_anagram.py\n\n\n# 字典\nclass Solution:\n def isAnagram(self, s, t):\n dict1, dict2 = {}, {}\n for item in s:\n dict1[item] = dict1.get(item, 0) + 1\n for item in t:\n dict2[item] = dict2.get(item, 0) + 1\n return dict1 == dict2\n\n# # 暴力sort\n# class Solution:\n# def isAnagram(self, s, t):\n# return sorted(s) == sorted(t)\n\nt = Solution()\nprint(t.isAnagram(s = \"anagram\", t = \"nagaram\"))\nprint(t.isAnagram(s = \"anagram\", t = \"nasaram\"))\n\n"
},
{
"alpha_fraction": 0.4002954065799713,
"alphanum_fraction": 0.43722304701805115,
"avg_line_length": 20.1875,
"blob_id": "926696db28e7035c820ecfc966a5d3582edc138d",
"content_id": "0000a9002b933f29365a78d1c0f80467a22c2683",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 32,
"path": "/Week_03/50_powx_n.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/27 16:26\n# @Author : weiyu\n# @File : 50_powx_n.py\n\n# 分治\nclass Solution:\n def myPow(self, x, n):\n if n < 0:\n return 1 / self.recursion(x, -n)\n else:\n return self.recursion(x, n)\n\n def recursion(self, x, n):\n if n == 0: return 1\n half = self.recursion(x, n // 2)\n if n % 2 == 0:\n return half * half\n else:\n return half * half * x\n\n# 暴力\nclass Solution:\n def myPow(self, x, n):\n if n < 0:\n n = -n\n x = 1 / x\n res = 1\n for i in range(n):\n res *= x\n return res"
},
{
"alpha_fraction": 0.4795221984386444,
"alphanum_fraction": 0.5085324048995972,
"avg_line_length": 21.423076629638672,
"blob_id": "cb632bc30a3884cad99455fcdde33ff2166d348d",
"content_id": "11921ef8d4a7620529469f4aaa9002a443649abc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 594,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 26,
"path": "/Week_03/78_subsets.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/27 17:02\n# @Author : weiyu\n# @File : 78_subsets.py\n\n# 迭代\nclass Solution:\n def subsets(self, nums):\n res = [[]]\n for num in nums:\n res = res + [[num] + sub for sub in res]\n return res\n\n\n# 递归\nclass Solution:\n def subsets(self, nums):\n res = []\n self.recursion(0, nums, [], res)\n return res\n\n def recursion(self, i, nums, tmp, res):\n res.append(tmp)\n for j in range(i, len(nums)):\n self.recursion(j + 1, nums, tmp + [nums[j]], res)\n\n\n\n"
},
{
"alpha_fraction": 0.43021345138549805,
"alphanum_fraction": 0.4712643623352051,
"avg_line_length": 29.450000762939453,
"blob_id": "ec7b5f363f3d0914136ec07f794f97b171d1fdec",
"content_id": "7464d75bcc4c4bc60088e24ed83b6a2d92f49f24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 609,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 20,
"path": "/Week_06/438_find_all_anagram_in_a_string.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 12:12\n# @Author : weiyu\n# @File : 438_find_all_anagram_in_a_string.py\nimport collections\n\nclass Solution:\n def findAnagrams(self, s, p):\n res = []\n pdic = collections.Counter(p)\n sdic = collections.Counter(s[:len(p) - 1])\n for i in range(len(p) - 1, len(s)):\n sdic[s[i]] += 1\n if sdic == pdic:\n res.append(i - len(p) + 1)\n sdic[s[i - len(p) + 1]] -= 1\n if sdic[s[i - len(p) + 1]] == 0:\n del sdic[s[i - len(p) + 1]]\n return res\n"
},
{
"alpha_fraction": 0.39901477098464966,
"alphanum_fraction": 0.44581282138824463,
"avg_line_length": 22.941177368164062,
"blob_id": "b606249bef0f716cc78013e7b10070479b2c1815",
"content_id": "6efc0b31fdb3f158f8b3ba772b1b608cf5756cde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 17,
"path": "/Week_03/153_find_minimun_in_rotated_sorted_array.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/31 2:50\n# @Author : weiyu\n# @File : 153_find_minimun_in_rotated_sorted_array.py\n\n\nclass Solution:\n def findMin(self, nums):\n i, j = 0, len(nums) - 1\n while i < j:\n mid = (i + j) // 2\n if nums[mid] > nums[j]:\n i = mid + 1\n else:\n j = mid\n return nums[i]"
},
{
"alpha_fraction": 0.37290501594543457,
"alphanum_fraction": 0.4203910529613495,
"avg_line_length": 30.173913955688477,
"blob_id": "57352846bca82fb29e67f44a802195de29f0d2d3",
"content_id": "f518d7b2d35f4e7b2cc9565f6562aca9bd03e65f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 716,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 23,
"path": "/Week_03/874_walking_robot_simulation.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/29 23:48\n# @Author : weiyu\n# @File : 874_walking_robot_simulation.py\n\n\nclass Solution:\n def robotSim(self, commands, obstacles):\n i = j = res = d = 0\n move = [(0, 1), (-1, 0), (0, -1), (1, 0)]\n obstacles = set(map(tuple, obstacles))\n for command in commands:\n if command == -2: d = (d + 1) % 4\n elif command == -1: d = (d - 1) % 4\n else:\n x, y = move[d]\n while command and (i + x, j + y) not in obstacles:\n i += x\n j += y\n command -= 1\n res = max(res, i ** 2 + j ** 2)\n return res"
},
{
"alpha_fraction": 0.3253493010997772,
"alphanum_fraction": 0.3972055912017822,
"avg_line_length": 25.394737243652344,
"blob_id": "251c33d835a3471ae2be4fe4d3bf89f183534589",
"content_id": "b6297c62a46223804f7c63631b6f7d1ee9f7843d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1034,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 38,
"path": "/Week_01/88_merge_sorted_array.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/18 18:27\n# @Author : weiyu\n# @File : 88_merge_sorted_array.py\n\n\n# 双指针,从后往前\n# class Solution:\n# def merge(self, nums1, m, nums2, n):\n# while m > 0 and n > 0:\n# if nums1[m - 1] > nums2[n - 1]:\n# nums1[m + n - 1] = nums1[m - 1]\n# m -= 1\n# else:\n# nums1[m + n - 1] = nums2[n - 1]\n# n -= 1\n# if n > 0:\n# nums1[:n] = nums2[:n]\n\n\n# 双指针,从前往后\nclass Solution:\n def merge(self, nums1, m, nums2, n):\n nums1_copy = nums1[:m]\n nums1[:] = []\n p1 = p2 = 0\n while p1 < m and p2 < n:\n if nums1_copy[p1] < nums2[p2]:\n nums1.append(nums1_copy[p1])\n p1 += 1\n else:\n nums1.append(nums2[p2])\n p2 += 1\n if p1 < m:\n nums1[p1 + p2:] = nums1_copy[p1:]\n if p2 < n:\n nums1[p1 + p2:] = nums2[p2:]"
},
{
"alpha_fraction": 0.5484693646430969,
"alphanum_fraction": 0.5892857313156128,
"avg_line_length": 27,
"blob_id": "4120a9d537c7afa58adc0185cbee29bad1a12b86",
"content_id": "18e5ecc0432fd6ab2a0aafdec992898acc2ba625",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 392,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 14,
"path": "/Week_04/621_task_scheduler.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 0:13\n# @Author : weiyu\n# @File : 621_task_scheduler.py\nimport collections\n\n# greedy\nclass Solution:\n def leastInterval(self, tasks, n):\n task_counts = list(collections.Counter(tasks).values())\n m = max(task_counts)\n mct = task_counts.count(m)\n return max(len(tasks), (m - 1) * (n + 1) + mct)\n"
},
{
"alpha_fraction": 0.40471869707107544,
"alphanum_fraction": 0.4537205100059509,
"avg_line_length": 28,
"blob_id": "43d159999cc5ca1806bab973f5e3a2ba108015f4",
"content_id": "f2c9c8343c6bc87435b3461f590ee347a74f7104",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 19,
"path": "/Week_04/403_frog_jump.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 19:25\n# @Author : weiyu\n# @File : 403_frog_jump.py\n\n# DP Time O(n^2) Space O(n^2) -> hashmap最大可能到n^2\nclass Solution:\n def canCross(self, stones):\n d = dict((x, set()) for x in stones)\n if stones[1] != 1: return False\n\n d[1].add(1)\n for x in stones[:-1]:\n for j in d[x]:\n for k in range(j - 1, j + 2):\n if k > 0 and x + k in d:\n d[x + k].add(k)\n return d[stones[-1]] != set()\n"
},
{
"alpha_fraction": 0.7692307829856873,
"alphanum_fraction": 0.771019697189331,
"avg_line_length": 23.2608699798584,
"blob_id": "aeb472eb65ebea4a4a45fb3fbfdcea252fbd444b",
"content_id": "f6d88d7900ee6888dcba99ab7127032a5d519a21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1439,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 23,
"path": "/Week_02/README.md",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "## 学习总结\n* 面试四步骤:阐述题目,分析各种解并说出时间空间复杂度,写代码,测试。这一周下意识都忘了分析题目并阐述时间复杂度的步骤,下周这方面要加强。\n* 感受到形成自己的思维逻辑也是很重要,可博采众长,但也需要有一些自己的思维框架,无论是算法原理还是代码框架都要自成系统。\n* 自上而下的编程方式利于思考,代码也易读。\n* 坚持五毒神掌很重要。\n\n\n### 树,二叉树,二叉搜索树\n* 常见题型:二叉树与N叉树前中后序遍历与层序遍历。\n* 前中后序遍历模板熟记。\n* 二叉搜索树,左子树结点均小于根节点,右子树节点均大于根节点,左右子树也分别为二叉搜索树。中序遍历是升序排列。\n* 无序树查、搜O(n),二叉搜索树O(nlogn)。\n\n### 堆\n* 常见题型:最大最小k个数,最高频的数等。可考虑维护长度为k的堆或长度为n的堆。\n* 查找O(1),删增o(logn);工业界多用斐波拉契堆。\n\n### 图\n* Graph(V,E)图有环路,DFS和BFS都需使用visited维护访问过的阶段\n### 递归\n* 常见题型:全排列类,组合,括号组合。(思维方式统一)\n递归必须有终止递归条件,之后才是处理逻辑并下探到下一层,切记。\n* 多考虑重复子问题,数学归纳法。没必要把问题复杂化,多总结规律就简单了。\n\n"
},
{
"alpha_fraction": 0.35181644558906555,
"alphanum_fraction": 0.4072657823562622,
"avg_line_length": 23.952381134033203,
"blob_id": "d95eb933b678316f02141f24706569025933b508",
"content_id": "ed5b5b280d2add72d06c05bec1b49b93ff76eef2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/Week_03/860_lemonade_change.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/29 20:53\n# @Author : weiyu\n# @File : 860_lemonade_change.py\n\n\nclass Solution:\n def lemonadeChange(self, bills):\n five = ten = 0\n for i in bills:\n if i == 5: five += 1\n elif i == 10: five -= 1; ten += 1\n elif i == 20:\n if ten:\n ten -= 1; five -= 1\n else:\n five -= 3\n if five < 0:\n return False\n return True"
},
{
"alpha_fraction": 0.43572986125946045,
"alphanum_fraction": 0.4749455451965332,
"avg_line_length": 26,
"blob_id": "922e71c9806ac849a697c26b3eb40f74aa79b194",
"content_id": "d171d9f4c2f542977ff12d3135a7307b8f868d1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 17,
"path": "/Week_06/300_longest_increasing_subsequence.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/20 0:01\n# @Author : weiyu\n# @File : 300_longest_increasing_subsequence.py\n\nclass Solution:\n def lengthOfLIS(self, nums):\n if not nums:\n return 0\n n = len(nums)\n dp = [1 for _ in range(n)]\n for i in range(n):\n for j in range(i):\n if nums[i] > nums[j]:\n dp[i] = max(dp[i], dp[j] + 1)\n return max(dp)\n"
},
{
"alpha_fraction": 0.34188035130500793,
"alphanum_fraction": 0.404558390378952,
"avg_line_length": 30.954545974731445,
"blob_id": "102e0dc354503226d881642eb0ede6a8f56672f8",
"content_id": "f77062e7d35c6649d22eba9505eb62c95486aa91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 702,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 22,
"path": "/Week_04/72_edit_distance.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/8 11:34\n# @Author : weiyu\n# @File : 72_edit_distance.py\n\n\nclass Solution:\n def minDistance(self, word1, word2):\n m, n = len(word1), len(word2)\n dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]\n for i in range(1, m + 1):\n dp[i][0] = i\n for j in range(1, n + 1):\n dp[0][j] = j\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1\n return dp[-1][-1]"
},
{
"alpha_fraction": 0.45825931429862976,
"alphanum_fraction": 0.4991118907928467,
"avg_line_length": 21.559999465942383,
"blob_id": "ac779cd4d7c7dfe0df90593c549f6ced972d760e",
"content_id": "e30aa0573c9002e2ed975bbc30ce89e759646655",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 25,
"path": "/Week_01/66_plus_one.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/18 19:32\n# @Author : weiyu\n# @File : 66_plus_one.py\n\n\n# int str转化法\nclass Solution:\n def plusOne(self, digits):\n num = int(\"\".join([str(c) for c in digits]))\n num += 1\n return [int(c) for c in str(num)]\n\n\n# 倒序遍历\nclass Solution:\n def plusOne(self, digits):\n for i in range(1, len(digits) + 1):\n if digits[-i] != 9:\n digits[-i] += 1\n return digits\n digits[-i] = 0\n digits.insert(0, 1)\n return digits"
},
{
"alpha_fraction": 0.7228346467018127,
"alphanum_fraction": 0.735433042049408,
"avg_line_length": 20.86206817626953,
"blob_id": "76373b886e0036f34df4901029d12a0543f89ac5",
"content_id": "835c87e2126a5419993fe4608f4d17be9dcd8839",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1553,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 29,
"path": "/Week_05/README.md",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "学习笔记\n## 学习总结\n* 对于一些难题感到分析时间复杂度困难,还是要多学习。专注,冷静坚持\n* 对于本周的新学习一些方法适当选择放弃,抓紧时间,老方法能做出题目也行\n* 面试四步骤:确认题意,阐述所有解并分析时空间复杂度,写代码,测试。勿忘\n* 多总结题型,找相似点,熟能生巧\n* 自上而下的编程\n* 五毒神掌 \n* 熟练度,防BUG!\n\n### 字典树和并查集\n* 特点: 两者皆有明显的套路,需反复学习熟记\n* 常见题型:单词搜索2 ; 朋友圈(并查集一般也能用dfs做)\n* 字典树:书上的每个节点放一个字符,python中一般用字典嵌套字典表示。优点查询效率高\n* 并查集:无他,唯需手熟\n\n### 高级搜索\n* 常见题型:\n* 剪枝:提前中断不可能的路径 if语句判断\n* 双向BFS: 头尾两端广度搜索,queue改用Set,优先处理长度短的set\n\n\n### 红黑树\n* AVL;平衡二叉搜索树,有平衡因子,四种旋转操作,结点存储空间需求大,旋转操作次数多。适用于多读少些的情况\n* 红黑树: 结点红或黑,根节点和叶子(空)结点为黑, 相邻两结点不全为红,任一结点到其叶子所有路径包含相同的结点数\n* AVL树查找更快,因为更平衡。红黑树插入删除更快。AVL树结点要保存平衡因子int,需求存储空间大,红黑树还行,只要1bit(0, 1)\n\n### 位运算\n* 最常用: n & (n - 1)去除最低位的0 判断奇偶 n & 1 == 1 \n"
},
{
"alpha_fraction": 0.3176795542240143,
"alphanum_fraction": 0.3674033284187317,
"avg_line_length": 29.20833396911621,
"blob_id": "a61af5f088aa8c680964986ed75b0e1216d184b0",
"content_id": "73ff69a9b9bb5b5ce56e1007d6ad48c38af237cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 724,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 24,
"path": "/Week_06/44_wildcard_matching.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/22 17:41\n# @Author : weiyu\n# @File : 44_wildcard_matching.py\n\n\nclass Solution:\n def isMatch(self, s, p):\n m, n = len(s), len(p)\n dp = [[False for _ in range(n + 1)] for _ in range(m + 1)]\n dp[0][0] = True\n for j in range(1, n + 1):\n if p[j - 1] == \"*\":\n dp[0][j] = True\n else:\n break\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if p[j - 1] == \"*\":\n dp[i][j] = dp[i - 1][j] | dp[i][j - 1]\n elif p[j - 1] in [s[i - 1], \"?\"]:\n dp[i][j] = dp[i - 1][j - 1]\n return dp[-1][-1]"
},
{
"alpha_fraction": 0.451977401971817,
"alphanum_fraction": 0.4934086501598358,
"avg_line_length": 26.894737243652344,
"blob_id": "477a99934951d313a8c7b97d7ea85ac4b5e2309b",
"content_id": "002a979a299c4008de05a130f38e40882152f089",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 531,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 19,
"path": "/Week_02/264_ugly_number2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/23 17:35\n# @Author : weiyu\n# @File : 264_ugly_number2.py\nimport heapq\n\nclass Solution:\n def nthUglyNumber(self, n):\n heap = [1]\n visited = set([1])\n for i in range(n):\n val = heapq.heappop(heap)\n for factor in [2, 3, 5]:\n next_val = factor * val\n if next_val not in visited:\n heapq.heappush(heap, next_val)\n visited.add(next_val)\n return val\n\n"
},
{
"alpha_fraction": 0.3641025722026825,
"alphanum_fraction": 0.42820513248443604,
"avg_line_length": 21.764705657958984,
"blob_id": "f851bd52be85135fcc0751a7765528032c53a0c4",
"content_id": "4759f8d95df37ed05234d490c1a2cd4222ec7ebc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 17,
"path": "/Week_05/338_counting_bits.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/12 23:55\n# @Author : weiyu\n# @File : 338_counting_bits.py\n\n\n\nclass Solution:\n def countBits(self, num):\n dp = [0 for _ in range(num + 1)]\n for i in range(1, num + 1):\n if i & 1 == 1:\n dp[i] = dp[i - 1] + 1\n else:\n dp[i] = dp[i // 2]\n return dp\n\n\n\n"
},
{
"alpha_fraction": 0.7719298005104065,
"alphanum_fraction": 0.7719298005104065,
"avg_line_length": 27,
"blob_id": "22aca4f3199ba81a408c0f3a4c379df1d84d5420",
"content_id": "a93c32aa33932b2cd33d496073cb97de2b5703d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 286,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 4,
"path": "/Week_04/README.md",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "\n## 学习总结\n* DP三步骤 :找出子问题(分治);定义状态数组 ; 找出DP方程\n* 简单和中等难度的DP一般DP方程比较好找,严格根据上方步骤多练即可做出\n* 困难难度的找出DP方程感觉很不容易,做完即忘,还是要多练\n\n"
},
{
"alpha_fraction": 0.43621399998664856,
"alphanum_fraction": 0.4691357910633087,
"avg_line_length": 19.85714340209961,
"blob_id": "7fc52d81fa5f8b942938eefea0ef3951a34aec13",
"content_id": "5c0ebe52d5c667ac7fbe9cce158b3b2c6d063b4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 745,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 35,
"path": "/Week_01/189_rotate_array.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/18 16:39\n# @Author : weiyu\n# @File : 189_rotate_array.py\n\n\n# 三次翻转\nclass Solution:\n def rotate(self, nums, k):\n n = len(nums)\n k %= n\n self.reverse(nums, 0, n - k -1)\n self.reverse(nums, n - k, n - 1)\n self.reverse(nums, 0, n - 1)\n\n\n def reverse(self,nums, l, r):\n while l < r:\n nums[l], nums[r] = nums[r], nums[l]\n l, r = l + 1, r - 1\n\n\n# 切片\n# class Solution:\n# def rotate(self, nums, k):\n# n = len(nums)\n# nums[:] = nums[n - k:] + nums[:n - k]\n\n\n# 插入\n# class Solution:\n# def rotate(self, nums, k):\n# for _ in range(k):\n# nums.insert(0, nums.pop())"
},
{
"alpha_fraction": 0.4046434462070465,
"alphanum_fraction": 0.46268656849861145,
"avg_line_length": 26.454545974731445,
"blob_id": "9245839ffe70415cbddb74d4fdce82186aeb9880",
"content_id": "84d4d95b9465de57693c1ee14236574203405632",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 611,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 22,
"path": "/Week_06/1122_relative_sort_array.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/18 19:58\n# @Author : weiyu\n# @File : 1122_relative_sort_array.py\n\n# 计数排序\nclass Solution:\n def relativeSortArray(self, arr1, arr2):\n arr = [0 for _ in range(1001)]\n res = []\n for i in range(len(arr1)):\n arr[arr1[i]] += 1\n for i in range(len(arr2)):\n while arr[arr2[i]] > 0:\n res.append(arr2[i])\n arr[arr2[i]] -= 1\n for i in range(len(arr)):\n while arr[i] > 0:\n res.append(i)\n arr[i] -= 1\n return res"
},
{
"alpha_fraction": 0.42431193590164185,
"alphanum_fraction": 0.4525993764400482,
"avg_line_length": 31.725000381469727,
"blob_id": "215594b3079076e947dadaa502e73befe8ad46ac",
"content_id": "d3078dce2433991b7bdd428be2b7fa4d4004738a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1308,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 40,
"path": "/Week_05/37_sudoku_solver.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/12 19:12\n# @Author : weiyu\n# @File : 37_sudoku_solver.py\n\n\nclass Solution:\n def solveSudoku(self, board):\n self.row = [set(range(1, 10)) for _ in range(9)]\n self.col = [set(range(1, 10)) for _ in range(9)]\n self.block = [set(range(1, 10)) for _ in range(9)]\n self.empty = []\n for i in range(9):\n for j in range(9):\n if board[i][j] != \".\":\n val = int(board[i][j])\n self.row[i].remove(val)\n self.col[j].remove(val)\n self.block[(i//3)*3 + j//3].remove(val)\n else:\n self.empty.append((i, j))\n self.dfs(board)\n\n def dfs(self, board, iter = 0):\n if iter == len(self.empty):\n return True\n i, j = self.empty[iter]\n b = (i//3)*3 + j//3\n for val in self.row[i] & self.col[j] & self.block[b]:\n self.row[i].remove(val)\n self.col[j].remove(val)\n self.block[b].remove(val)\n board[i][j] = str(val)\n if self.dfs(board, iter + 1):\n return True\n self.row[i].add(val)\n self.col[j].add(val)\n self.block[b].add(val)\n return False"
},
{
"alpha_fraction": 0.39731284976005554,
"alphanum_fraction": 0.445297509431839,
"avg_line_length": 29.705883026123047,
"blob_id": "5d6a343eb69c99a39d8dae607735e1fdca84cb52",
"content_id": "f88b0199a84756d1d6df471fc90fd7c788b8ebd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 521,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 17,
"path": "/Week_04/312_burst_balloons.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/9 23:53\n# @Author : weiyu\n# @File : 312_burst_balloons.py\n\n\nclass Solution:\n def maxCoins(self, nums):\n nums = [1] + nums + [1]\n n = len(nums)\n dp = [[0 for _ in range(n)] for _ in range(n)]\n for i in range(n - 2, -1, -1):\n for j in range(i + 1, n):\n for k in range(i + 1, j):\n dp[i][j] = max(dp[i][j], dp[i][k] + dp[k][j] + nums[i] * nums[j] * nums[k])\n return dp[0][-1]"
},
{
"alpha_fraction": 0.7733141779899597,
"alphanum_fraction": 0.77546626329422,
"avg_line_length": 30.68181800842285,
"blob_id": "f30b454001068d0d73cf1a0d87232058350ffecd",
"content_id": "2b53c2713b3f850b0aa82c070dafc185d4275ae2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2520,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 44,
"path": "/Week_01/README.md",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "## 学习总结\n三分视频,七分练习。切碎知识点,可以练习,之后得到反馈。\n练习时:五毒神掌快速的五遍练习\n面试时:四件套(clarification,posible solution(time/space);coding;test cases\n\n### 数组\n记住常见长路和相关重要函数,常见套路各种双指针\n### 链表\n常见题型:常见解法基本都分为迭代法和递归法\n特殊题型:环型链表\n### 栈\n成对存在的符号考虑用栈;找左右边界也可考虑用栈(递增栈,递减栈)\n### 队列\n滑窗类相关\n### 哈希表\n异位词类;存在强对应关系\n\n## 感想\n执行五毒神掌,感觉效率高上许多。我觉得比较难的地方是看到题目如何能迅速判断用什么方式解决,要是方向错了写代码就比较困难。之后多练习多总结吧。还是要多练,过了2 3 天感觉可能又写不出来了。严格执行超哥的介绍的学习方法,之后就由时间来给出答案吧。\n\n## 简单题目\n[删除排序数组中的重复项(Facebook、字节跳动、微软在半年内面试中考过)](https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array)\n\n[旋转数组(微软、亚马逊、PayPal 在半年内面试中考过)](https://leetcode-cn.com/problems/rotate-array/)\n\n[合并两个有序链表(亚马逊、字节跳动在半年内面试常考)](https://leetcode-cn.com/problems/merge-two-sorted-lists/)\n\n[合并两个有序数组(Facebook 在半年内面试常考)](https://leetcode-cn.com/problems/merge-sorted-array/)\n\n[两数之和(亚马逊、字节跳动、谷歌、Facebook、苹果、微软在半年内面试中高频常考)](https://leetcode-cn.com/problems/two-sum/)\n\n[移动零(Facebook、亚马逊、苹果在半年内面试中考过)](https://leetcode-cn.com/problems/move-zeroes/)\n\n[加一(谷歌、字节跳动、Facebook 在半年内面试中考过)](https://leetcode-cn.com/problems/plus-one/)\n\n[有效的字母异位词(亚马逊、Facebook、谷歌在半年内面试中考过)](https://leetcode-cn.com/problems/valid-anagram/description/)\n\n## 中等题目\n[设计循环双端队列(Facebook 在 1 年内面试中考过)](https://leetcode-cn.com/problems/design-circular-deque/)\n\n[字母异位词分组(亚马逊在半年内面试中常考)](https://leetcode-cn.com/problems/group-anagrams/)\n\n## 困难题目\n[接雨水(亚马逊、字节跳动、高盛集团、Facebook 在半年内面试常考)](https://leetcode-cn.com/problems/trapping-rain-water/)\n"
},
{
"alpha_fraction": 0.3826290965080261,
"alphanum_fraction": 0.4201878011226654,
"avg_line_length": 30.592592239379883,
"blob_id": "0d0a694c9c5e327279669fcc19390455372f5a0a",
"content_id": "c923efe8246f949fda131e38a85ec217c60b1868",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 27,
"path": "/Week_05/36_valid_suduku.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/12 12:07\n# @Author : weiyu\n# @File : 36_valid_suduku.py\n\n\nclass Solution:\n def isValidSudoku(self, board):\n rows = [{} for _ in range(9)]\n cols = [{} for _ in range(9)]\n blocks = [{} for _ in range(9)]\n\n for i in range(9):\n for j in range(9):\n num = board[i][j]\n if num != \".\":\n num = int(num)\n block_idx = (i // 3) * 3 + j // 3\n\n rows[i][num] = rows[i].get(num, 0) + 1\n cols[j][num] = cols[j].get(num, 0) + 1\n blocks[block_idx][num] = blocks[block_idx].get(num, 0) + 1\n\n if rows[i][num] > 1 or cols[j][num] > 1 or blocks[block_idx][num] > 1:\n return False\n return True"
},
{
"alpha_fraction": 0.4633273780345917,
"alphanum_fraction": 0.4919499158859253,
"avg_line_length": 25.619047164916992,
"blob_id": "93a71e695a84d86418cdd0c1631e4d0c59dbfeb9",
"content_id": "65bc990d59147885e18e17540aaf038ca152bbfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 559,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 21,
"path": "/Week_05/52_n_queens2.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/12 23:59\n# @Author : weiyu\n# @File : 52_n_queens2.py\n\n\n\nclass Solution:\n def totalNQueens(self, n):\n self.res = []\n self.dfs([], [], [], n)\n return len(self.res)\n\n def dfs(self, cols, pie, na, n):\n row = len(cols)\n if row == n:\n self.res.append(cols)\n for col in range(n):\n if col not in cols and row + col not in pie and row - col not in na:\n self.dfs(cols + [col], pie + [row + col], na + [row - col], n)\n"
},
{
"alpha_fraction": 0.43066665530204773,
"alphanum_fraction": 0.46133333444595337,
"avg_line_length": 22.4375,
"blob_id": "cc233f8d4519784011acf7c3ef0a0df23e65455d",
"content_id": "a0f78493a95ca88ab7bd3edf1c5d5b2280f827b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 762,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 32,
"path": "/Week_01/1_two_sum.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/16 16:32\n# @Author : weiyu\n# @File : 1_two_sum.py\n\n# 暴力法\nclass Solution:\n def twoSum(self, nums, target):\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n if nums[i] + nums[j] == target:\n return [i, j]\n return []\n\n\n# 哈希表\n# class Solution:\n# def twoSum(self, nums, target):\n# if len(nums) <= 1:\n# return False\n# buff_dict = {}\n# for i in range(len(nums)):\n# if nums[i] in buff_dict:\n# return [buff_dict[nums[i]], i]\n# else:\n# buff_dict[target - nums[i]] = i\n\n\n\nt = Solution()\nprint(t.twoSum([2, 7, 11, 15], 9))\n"
},
{
"alpha_fraction": 0.4308560788631439,
"alphanum_fraction": 0.45531514286994934,
"avg_line_length": 26.28205108642578,
"blob_id": "fbd5bc98da6a5e08768dae97b59de395a87101fd",
"content_id": "4f8a76a0b5d18bee8a2147d332b6f6aa5644fcd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1085,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 39,
"path": "/Week_03/74_search_a_2D_matrix.py",
"repo_name": "underseatravel/AlgorithmQIUZHAO",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/31 2:20\n# @Author : weiyu\n# @File : 74_search_a_2D_matrix.py\n\n# 转化为一维\nclass Solution:\n def searchMatrix(self, matrix, target):\n nums = []\n for k in matrix:\n nums.extend(k)\n low, high = 0, len(nums) - 1\n while low <= high:\n mid = (low + high) // 2\n if nums[mid] == target:\n return True\n elif nums[mid] < target:\n low = mid + 1\n else:\n high = mid - 1\n return False\n\n# 二维二分查找\nclass Solution:\n def searchMatrix(self, matrix, target):\n if not matrix: return False\n rows, cols = len(matrix), len(matrix[0])\n low, high = 0, rows * cols - 1\n while low <= high:\n mid = (low + high) // 2\n num = matrix[mid // cols][mid % cols]\n if num == target:\n return True\n elif num > target:\n high = mid - 1\n else:\n low = mid + 1\n return False"
}
] | 85 |
minrk/appnope
|
https://github.com/minrk/appnope
|
5d3c7d5951a8eed7b785193d0bf0a301318d9027
|
38e1e1252c263e4f110b427fd13cb81ac6fe56cb
|
83f367e4006eb0aa403d2b52e84fdd2274e85bbf
|
refs/heads/main
| 2023-06-08T09:47:38.984028 | 2022-04-04T10:06:38 | 2022-04-04T10:06:38 | 13,947,252 | 48 | 10 |
NOASSERTION
| 2013-10-29T05:24:52 | 2021-11-09T05:33:37 | 2022-04-04T10:06:38 |
Python
|
[
{
"alpha_fraction": 0.6700680255889893,
"alphanum_fraction": 0.6700680255889893,
"avg_line_length": 18.600000381469727,
"blob_id": "bedd3bc625d07b9841a4ab3f2b2c0b12c5f3340a",
"content_id": "bc537db84b15aa33f5ac3d8d5cb17057a4e89034",
"detected_licenses": [
"BSD-2-Clause",
"BSD-1-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 294,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 15,
"path": "/tests/test_appnope.py",
"repo_name": "minrk/appnope",
"src_encoding": "UTF-8",
"text": "import sys\nimport appnope\n\n\ndef test_nope_scope():\n with appnope.nope_scope():\n pass\n\n\ndef test_nope():\n assert appnope.napping_allowed()\n appnope.nope()\n assert not appnope.napping_allowed() or sys.platform != \"Darwin\"\n appnope.nap()\n assert appnope.napping_allowed()\n"
},
{
"alpha_fraction": 0.5505245923995972,
"alphanum_fraction": 0.5615682005882263,
"avg_line_length": 28.68852424621582,
"blob_id": "533a8f12b614d5f7dc3b19510a8dafa0d8f810c7",
"content_id": "365e3d11f035c4d6cbfcaeefb91ee934fa31eb87",
"detected_licenses": [
"BSD-2-Clause",
"BSD-1-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1811,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 61,
"path": "/setup.py",
"repo_name": "minrk/appnope",
"src_encoding": "UTF-8",
"text": "#-----------------------------------------------------------------------------\n# Copyright (C) 2013 Min RK\n#\n# Distributed under the terms of the 2-clause BSD License.\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.command.bdist_egg import bdist_egg\n\n\nwith open('appnope/__init__.py') as f:\n for line in f:\n if line.startswith('__version__'):\n __version__ = eval(line.split('=', 1)[1])\n break\n\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n\n Prevents setup.py install from performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n def run(self):\n sys.exit(\"Aborting implicit building of eggs. Use `pip install .` to install from source.\")\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\n\nsetup_args = dict(\n name=\"appnope\",\n version=__version__,\n packages=[\"appnope\"],\n author=\"Min Ragan-Kelley\",\n author_email=\"[email protected]\",\n url=\"http://github.com/minrk/appnope\",\n description=\"Disable App Nap on macOS >= 10.9\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license = \"BSD\",\n cmdclass = {\n 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else 'bdist_egg_disabled',\n },\n classifiers = [\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n ],\n)\n\nsetup(**setup_args)\n"
}
] | 2 |
Diana-Matar/REDI-DWP-Monster-Game
|
https://github.com/Diana-Matar/REDI-DWP-Monster-Game
|
48aa28a099d96241d2e1e589af6e4c75c4b053b3
|
71104f2764edce8469b2e65fd3fa489f04eda4c0
|
621eade34d5ee9897558f39e688122ffac10cbfa
|
refs/heads/main
| 2023-05-15T04:47:14.604456 | 2021-05-29T12:01:54 | 2021-05-29T12:01:54 | 366,018,451 | 0 | 11 | null | 2021-05-10T11:25:21 | 2021-05-15T09:29:40 | 2021-05-29T12:01:54 |
Python
|
[
{
"alpha_fraction": 0.5641344785690308,
"alphanum_fraction": 0.7708593010902405,
"avg_line_length": 49.0625,
"blob_id": "911e54af167458a0d5b74fe42152821bd7434330",
"content_id": "08569e534dce9881caf1b19bab494c3387773846",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 16,
"path": "/README.md",
"repo_name": "Diana-Matar/REDI-DWP-Monster-Game",
"src_encoding": "UTF-8",
"text": "\n# REDI-DWP-Monster-Game\n\n# Project introduction \nLet's build a game in Python where we as players fight the monster! In the game, we create a player name, attack the monster, heal yourself, and display high scores from previous rounds.\n\n# How To Play\nPlayer vs Monster\n\n<img width=\"737\" alt=\"Screenshot 2021-05-10 at 13 26 50\" src=\"https://user-images.githubusercontent.com/11426981/117652559-9d29c000-b193-11eb-858b-360f4c733f2c.png\">\n\n# Attack Game\n\n<img width=\"751\" alt=\"Screenshot 2021-05-10 at 13 27 00\" src=\"https://user-images.githubusercontent.com/11426981/117652744-d5310300-b193-11eb-89db-89990192473e.png\">\n\n# How to attack\n<img width=\"717\" alt=\"Screenshot 2021-05-10 at 13 27 12\" src=\"https://user-images.githubusercontent.com/11426981/117652681-be8aac00-b193-11eb-89f5-51cd1d9c4ba4.png\">\n\n"
},
{
"alpha_fraction": 0.594793438911438,
"alphanum_fraction": 0.607809841632843,
"avg_line_length": 29.465517044067383,
"blob_id": "b1e04990ddf992200473ab91ba2318315cf18636",
"content_id": "a8d80acbb278c9df7a72406467c35102105b8616",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1767,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 58,
"path": "/main.py",
"repo_name": "Diana-Matar/REDI-DWP-Monster-Game",
"src_encoding": "UTF-8",
"text": "from random import randint\ngame_running = True\n\ndef calculate_monster_attack(attack_min, attack_max):\n return randint(attack_min, attack_max)\n\nprint('Welcome to Monster Game!!')\nwhile game_running == True:\n new_round = True\n player = {'name': 'Diana', 'attack': 10, 'heal': 16, 'health': 100}\n monster = {'name': 'Max', 'attack_min': 10, 'attack_max': 20, 'health': 100}\n\n while new_round == True:\n \n player_won = False\n monster_won = False\n\n print('Please select action')\n print('1) Attack')\n print('2) Heal')\n print('3) Exit Game')\n\n player_choice = input()\n\n if player_choice == '1':\n monster['health'] = monster['health'] - player['attack']\n if monster['health'] <= 0:\n player_won = True\n print('Player Won...')\n\n player['health'] = player['health'] - calculate_monster_attack(monster['attack_min'], monster['attack_max'])\n if player['health'] <= 0:\n monster_won = True\n print('Monster Won...')\n\n\n elif player_choice == '2':\n player['health'] = player['health'] + player['heal']\n \n player['health'] = player['health'] - calculate_monster_attack(monster['attack_min'], monster['attack_max']) \n if player['health'] <= 0:\n monster_won = True\n print('Monster Won...')\n\n\n elif player_choice == '3':\n new_round = False\n game_running = False\n\n else:\n print('Invalid Input') \n\n print('Player heakth:' + str(player['health']))\n print('Monster heakth:' + str(monster['health'])) \n\n # Adding exit condition should keep the game running untill either player or monste win the game (health of either player or monster equal to or smaller than zero). \n if player_won == True or monster_won == True:\n new_round = False\n"
}
] | 2 |
Mansur1010/maneta_python
|
https://github.com/Mansur1010/maneta_python
|
c704ae2843156d95fb0745d06a7a2975a94274ce
|
3a713d1e205815fc312173d520460cf4bafb61ca
|
a8ad8a6949279a0d5e0277b72b8059e7d98bce31
|
refs/heads/master
| 2023-06-03T07:48:10.867366 | 2021-06-18T08:55:13 | 2021-06-18T08:55:13 | 378,085,777 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6451489925384521,
"alphanum_fraction": 0.654316246509552,
"avg_line_length": 39.489688873291016,
"blob_id": "6756ac266be50235b2476a817a36c2dc4ebca33f",
"content_id": "fae5e50371a23ada16a92f7e777f900b4cbdb2a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8656,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 194,
"path": "/Мансур.py",
"repo_name": "Mansur1010/maneta_python",
"src_encoding": "UTF-8",
"text": "import telebot\nfrom telebot import types\nimport requests\n\nTOKEN = '1720458307:AAFhGB66NVV35UgdvsJhmrRgUprmuoZxEpI'\n\nWEATHER_TOKEN = 'ec92b34a0720e9ae01f7c775538e6589'\n\nbot = telebot.TeleBot(TOKEN)\n\[email protected]_handler(commands=['test', 'start', 'help', 'weather', 'film', 'find', 'profile', 'calculate'])\ndef start_bot(message):\n if message.text.lower() == '/start':\n keyboard = types.InlineKeyboardMarkup()\n btn = types.InlineKeyboardButton('Шутка',callback_data= 'joke')\n google = types.InlineKeyboardButton(\n 'гугл',\n url='https://google.ru')\n test = types.InlineKeyboardButton('Тест', callback_data='test')\n keyboard.add(btn)\n keyboard.add(google)\n keyboard.add(test)\n \n bot.send_message(message.chat.id,\n 'всем привет!\\n Я новый бот',\n reply_markup=keyboard)\n \n\n if message.text.lower() == '/start':\n bot.send_message(message.chat.id, 'вот список команд\\n/help\\n/weather\\n/film\\n/find\\n/profile\\n/calculate')\n \n \n elif message.text.lower() == '/weather':\n bot.send_message(message.chat.id, 'вы в разделе погода')\n bot.send_message(message.chat.id, 'введите названия города')\n bot.register_next_step_handler(message, weather_menu)\n \n elif message.text.lower() == '/help':\n bot.send_message(message.chat.id, 'вы сами разберетесь')\n bot.send_message(message.chat.id, 'я в вас верю')\n \n elif message.text.lower() == '/film':\n bot.send_message(message.chat.id, 'какой жанр вы хотите посмотреть')\n \n elif message.text.lower() == '/find':\n bot.send_message(message.chat.id, 'что вы хотите найти?')\n \n elif message.text.lower() == '/profile':\n bot.send_message(message.chat.id, 'вы хотите заполнить профиль?')\n bot.send_message(message.chat.id, 'тогда скажите свое имя')\n bot.register_next_step_handler(message, enter_name)\n \n elif message.text.lower() == '/calculate':\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton('плюс')\n btn2 = types.KeyboardButton('минус')\n btn3 = types.KeyboardButton('делить')\n btn4 = types.KeyboardButton('умножить')\n btn5 = types.KeyboardButton('степень')\n keyboard.add(btn1)\n keyboard.add(btn2)\n keyboard.add(btn3)\n keyboard.add(btn4)\n keyboard.add(btn5)\n bot.send_message(message.chat.id,\n 'выберите действие:',\n reply_markup=keyboard)\n bot.register_next_step_handler(message, calculate_choose)\ndef calculate_choose(message):\n if message.text.lower() == 'плюс':\n bot.send_message(message.chat.id, \"что вы хотите сложить?\")\n bot.send_message(message.chat.id, \"введите два числа через пробел\")\n bot.register_next_step_handler(message, calculate_result_plus)\n elif message.text.lower() == 'минус':\n bot.send_message(message.chat.id, \"что вы хотите вычеслить?\")\n bot.send_message(message.chat.id, \"введите два числа через пробел\")\n bot.register_next_step_handler(message, calculate_result_minus)\n elif message.text.lower() == 'делить':\n bot.send_message(message.chat.id, \"что вы хотите разделить\")\n bot.send_message(message.chat.id, \"введите два числа через пробел\")\n bot.register_next_step_handler(message, calculate_result_divide)\n elif message.text.lower() == 'умножить':\n bot.send_message(message.chat.id, \"что вы хотите умножить?\")\n bot.send_message(message.chat.id, \"введите два числа через пробел\")\n bot.register_next_step_handler(message, calculate_result_multiply)\n elif message.text.lower() == 'степень':\n bot.send_message(message.chat.id, \"что вы хотите возвести в степень?\")\n bot.send_message(message.chat.id, \"введите два числа через пробел\")\n bot.register_next_step_handler(message, calculate_result_stepen)\ndef calculate_result_plus(message):\n nums = message.text.split()\n num1 = int(nums[0])\n num2 = int(nums[1])\n bot.send_message(message.chat.id, f\"результат {num1 + num2}\")\ndef calculate_result_minus(message):\n nums = message.text.split()\n num1 = int(nums[0])\n num2 = int(nums[1])\n bot.send_message(message.chat.id, f\"результат {num1 - num2}\")\ndef calculate_result_divide(message):\n nums = message.text.split()\n num1 = int(nums[0])\n num2 = int(nums[1])\n bot.send_message(message.chat.id, f\"результат {num1 / num2}\")\ndef calculate_result_multiply(message):\n nums = message.text.split()\n num1 = int(nums[0])\n num2 = int(nums[1])\n bot.send_message(message.chat.id, f\"результат {num1 * num2}\")\ndef calculate_result_stepen(message):\n nums = message.text.split()\n num1 = int(nums[0])\n num2 = int(nums[1])\n bot.send_message(message.chat.id, f\"результат {num1 ** num2}\")\n\[email protected]_handler(content_type=['text'])\ndef enter_name(message):\n name = message.text\n bot.send_message(message.chat.id, f\"твое имя {name}\")\n bot.send_message(message.chat.id, f\"какой у тебя возраст?\")\n bot.register_next_step_handler(message, enter_age)\[email protected]_handler(content_type=['text'])\ndef enter_age(message):\n vozrast = message.text\n bot.send_message(message.chat.id, f\"тебе {vozrast} лет\")\n bot.send_message(message.chat.id, f\"какой у тебя номер телефона?\")\n bot.register_next_step_handler(message, enter_number)\[email protected]_handler(content_type=['text'])\ndef enter_number(message):\n nomer = message.text\n bot.send_message(message.chat.id, f\"твой номер телефона: {nomer}\")\n bot.send_message(message.chat.id, f\"где вы живете\")\n bot.register_next_step_handler(message, enter_place)\[email protected]_handler(content_type=['text'])\ndef enter_place(message):\n zhizn = message.text\n bot.send_message(message.chat.id, f\"вы живете в {zhizn}\")\n bot.send_message(message.chat.id, f\"какой у вас любимый фильм?\")\n bot.register_next_step_handler(message, enter_film)\[email protected]_handler(content_type=['text'])\ndef enter_film(message):\n kino = message.text\n bot.send_message(message.chat.id, f\"ваш любимый фильм {kino}\")\n bot.send_message(message.chat.id, f\"вы создали свой профиль\")\n \n \[email protected]_query_handler(func=lambda x: x.data == 'joke')\ndef joke_fn(message):\n bot.send_message(message.from_user.id, 'https://youtu.be/JyD13ifbAN4')\n \n \[email protected]_query_handler(func=lambda x: x.data == 'test')\ndef test_btn(message):\n keyboard = types.InlineKeyboardMarkup()\n btn7 = types.InlineKeyboardButton('100 м',callback_data='ere1')\n btn8 = types.InlineKeyboardButton('600 м', callback_data='ere2')\n btn9 = types.InlineKeyboardButton('700 м', callback_data='ere1')\n keyboard.add(btn7)\n keyboard.add(btn8)\n keyboard.add(btn9)\n bot.send_message(message.from_user.id, 'Вопрос: Cколько метров эйфелевая башня?', reply_markup=keyboard)\n\[email protected]_query_handler(func=lambda x: x.data == 'ere1')\ndef answer1(message):\n bot.send_message(message.from_user.id, 'вы идиот!')\[email protected]_query_handler(func=lambda x: x.data == 'ere2')\ndef answer2(message):\n bot.send_message(message.from_user.id, 'верный ответ')\n\n\ndef weather_menu(message):\n city = message.text\n API_URL = f'https://api.openweathermap.org/data/2.5/weather?q={city}&appid={WEATHER_TOKEN}'\n r = requests.get(API_URL)\n w = r.json()\n bot.send_message(message.chat.id, f\"в городе: {w['name']}\")\n bot.send_message(message.chat.id, f\"Температура: {w['main']['temp']-273.15}\")\n bot.send_message(message.chat.id, f'Скорость ветра: {w[\"wind\"][\"speed\"]}м/c')\n bot.send_message(message.chat.id, f'Давление: {w[\"main\"][\"pressure\"]}Па' )\n bot.send_message(message.chat.id, f'Влажность: {w [\"main\"][\"humidity\"]}%')\n \n \n \n \n \n \n \n \n \n \n \n \n \nbot.polling()"
}
] | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.